Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.647
      1 /*	$NetBSD: if_wm.c,v 1.647 2019/09/04 07:07:09 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.647 2019/09/04 07:07:09 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256U
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 	krndsource_t rnd_source;	/* random source */
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	int sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    592 
    593 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    594 	kmutex_t *sc_ich_phymtx;	/*
    595 					 * 82574/82583/ICH/PCH specific PHY
    596 					 * mutex. For 82574/82583, the mutex
    597 					 * is used for both PHY and NVM.
    598 					 */
    599 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    600 
    601 	struct wm_phyop phy;
    602 	struct wm_nvmop nvm;
    603 };
    604 
    605 #define WM_CORE_LOCK(_sc)						\
    606 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    607 #define WM_CORE_UNLOCK(_sc)						\
    608 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)						\
    610 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    611 
    612 #define	WM_RXCHAIN_RESET(rxq)						\
    613 do {									\
    614 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    615 	*(rxq)->rxq_tailp = NULL;					\
    616 	(rxq)->rxq_len = 0;						\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #define	WM_RXCHAIN_LINK(rxq, m)						\
    620 do {									\
    621 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    622 	(rxq)->rxq_tailp = &(m)->m_next;				\
    623 } while (/*CONSTCOND*/0)
    624 
    625 #ifdef WM_EVENT_COUNTERS
    626 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    627 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    628 
    629 #define WM_Q_EVCNT_INCR(qname, evname)			\
    630 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    631 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    632 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    633 #else /* !WM_EVENT_COUNTERS */
    634 #define	WM_EVCNT_INCR(ev)	/* nothing */
    635 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    636 
    637 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    638 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    639 #endif /* !WM_EVENT_COUNTERS */
    640 
    641 #define	CSR_READ(sc, reg)						\
    642 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    643 #define	CSR_WRITE(sc, reg, val)						\
    644 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    645 #define	CSR_WRITE_FLUSH(sc)						\
    646 	(void)CSR_READ((sc), WMREG_STATUS)
    647 
    648 #define ICH8_FLASH_READ32(sc, reg)					\
    649 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    650 	    (reg) + sc->sc_flashreg_offset)
    651 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    652 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset, (data))
    654 
    655 #define ICH8_FLASH_READ16(sc, reg)					\
    656 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    657 	    (reg) + sc->sc_flashreg_offset)
    658 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    659 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    660 	    (reg) + sc->sc_flashreg_offset, (data))
    661 
    662 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    663 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    664 
    665 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    666 #define	WM_CDTXADDR_HI(txq, x)						\
    667 	(sizeof(bus_addr_t) == 8 ?					\
    668 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    669 
    670 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    671 #define	WM_CDRXADDR_HI(rxq, x)						\
    672 	(sizeof(bus_addr_t) == 8 ?					\
    673 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    674 
    675 /*
    676  * Register read/write functions.
    677  * Other than CSR_{READ|WRITE}().
    678  */
    679 #if 0
    680 static inline uint32_t wm_io_read(struct wm_softc *, int);
    681 #endif
    682 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    683 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    684     uint32_t, uint32_t);
    685 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    686 
    687 /*
    688  * Descriptor sync/init functions.
    689  */
    690 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    691 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    692 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    693 
    694 /*
    695  * Device driver interface functions and commonly used functions.
    696  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    697  */
    698 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    699 static int	wm_match(device_t, cfdata_t, void *);
    700 static void	wm_attach(device_t, device_t, void *);
    701 static int	wm_detach(device_t, int);
    702 static bool	wm_suspend(device_t, const pmf_qual_t *);
    703 static bool	wm_resume(device_t, const pmf_qual_t *);
    704 static void	wm_watchdog(struct ifnet *);
    705 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    706     uint16_t *);
    707 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    708     uint16_t *);
    709 static void	wm_tick(void *);
    710 static int	wm_ifflags_cb(struct ethercom *);
    711 static int	wm_ioctl(struct ifnet *, u_long, void *);
    712 /* MAC address related */
    713 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    714 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    715 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    716 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    717 static int	wm_rar_count(struct wm_softc *);
    718 static void	wm_set_filter(struct wm_softc *);
    719 /* Reset and init related */
    720 static void	wm_set_vlan(struct wm_softc *);
    721 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    722 static void	wm_get_auto_rd_done(struct wm_softc *);
    723 static void	wm_lan_init_done(struct wm_softc *);
    724 static void	wm_get_cfg_done(struct wm_softc *);
    725 static int	wm_phy_post_reset(struct wm_softc *);
    726 static int	wm_write_smbus_addr(struct wm_softc *);
    727 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    728 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    729 static void	wm_initialize_hardware_bits(struct wm_softc *);
    730 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    731 static int	wm_reset_phy(struct wm_softc *);
    732 static void	wm_flush_desc_rings(struct wm_softc *);
    733 static void	wm_reset(struct wm_softc *);
    734 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    735 static void	wm_rxdrain(struct wm_rxqueue *);
    736 static void	wm_init_rss(struct wm_softc *);
    737 static void	wm_adjust_qnum(struct wm_softc *, int);
    738 static inline bool	wm_is_using_msix(struct wm_softc *);
    739 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    740 static int	wm_softint_establish(struct wm_softc *, int, int);
    741 static int	wm_setup_legacy(struct wm_softc *);
    742 static int	wm_setup_msix(struct wm_softc *);
    743 static int	wm_init(struct ifnet *);
    744 static int	wm_init_locked(struct ifnet *);
    745 static void	wm_unset_stopping_flags(struct wm_softc *);
    746 static void	wm_set_stopping_flags(struct wm_softc *);
    747 static void	wm_stop(struct ifnet *, int);
    748 static void	wm_stop_locked(struct ifnet *, int);
    749 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    750 static void	wm_82547_txfifo_stall(void *);
    751 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    752 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    753 /* DMA related */
    754 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    758     struct wm_txqueue *);
    759 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    762     struct wm_rxqueue *);
    763 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    770     struct wm_txqueue *);
    771 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    772     struct wm_rxqueue *);
    773 static int	wm_alloc_txrx_queues(struct wm_softc *);
    774 static void	wm_free_txrx_queues(struct wm_softc *);
    775 static int	wm_init_txrx_queues(struct wm_softc *);
    776 /* Start */
    777 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    778     struct wm_txsoft *, uint32_t *, uint8_t *);
    779 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    780 static void	wm_start(struct ifnet *);
    781 static void	wm_start_locked(struct ifnet *);
    782 static int	wm_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    787     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    788 static void	wm_nq_start(struct ifnet *);
    789 static void	wm_nq_start_locked(struct ifnet *);
    790 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    791 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    792 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    793     bool);
    794 static void	wm_deferred_start_locked(struct wm_txqueue *);
    795 static void	wm_handle_queue(void *);
    796 /* Interrupt */
    797 static bool	wm_txeof(struct wm_txqueue *, u_int);
    798 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    799 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr(struct wm_softc *, uint32_t);
    803 static int	wm_intr_legacy(void *);
    804 static inline void	wm_txrxintr_disable(struct wm_queue *);
    805 static inline void	wm_txrxintr_enable(struct wm_queue *);
    806 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    807 static int	wm_txrxintr_msix(void *);
    808 static int	wm_linkintr_msix(void *);
    809 
    810 /*
    811  * Media related.
    812  * GMII, SGMII, TBI, SERDES and SFP.
    813  */
    814 /* Common */
    815 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    816 /* GMII related */
    817 static void	wm_gmii_reset(struct wm_softc *);
    818 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    819 static int	wm_get_phy_id_82575(struct wm_softc *);
    820 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    821 static int	wm_gmii_mediachange(struct ifnet *);
    822 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    823 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    824 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    825 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    826 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    827 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    828 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    829 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    834 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    835 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    836 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    837 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    840 	bool);
    841 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    845 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    848 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    849 static void	wm_gmii_statchg(struct ifnet *);
    850 /*
    851  * kumeran related (80003, ICH* and PCH*).
    852  * These functions are not for accessing MII registers but for accessing
    853  * kumeran specific registers.
    854  */
    855 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    858 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    859 /* EMI register related */
    860 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    861 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    862 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    863 /* SGMII */
    864 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    865 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    868 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    869 /* TBI related */
    870 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    871 static void	wm_tbi_mediainit(struct wm_softc *);
    872 static int	wm_tbi_mediachange(struct ifnet *);
    873 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    874 static int	wm_check_for_link(struct wm_softc *);
    875 static void	wm_tbi_tick(struct wm_softc *);
    876 /* SERDES related */
    877 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    878 static int	wm_serdes_mediachange(struct ifnet *);
    879 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    880 static void	wm_serdes_tick(struct wm_softc *);
    881 /* SFP related */
    882 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    883 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    884 
    885 /*
    886  * NVM related.
    887  * Microwire, SPI (w/wo EERD) and Flash.
    888  */
    889 /* Misc functions */
    890 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    891 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    892 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    893 /* Microwire */
    894 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    895 /* SPI */
    896 static int	wm_nvm_ready_spi(struct wm_softc *);
    897 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    898 /* Using with EERD */
    899 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    900 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    901 /* Flash */
    902 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    903     unsigned int *);
    904 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    905 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    906 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    907     uint32_t *);
    908 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    909 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    910 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    911 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    912 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    913 /* iNVM */
    914 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    915 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    916 /* Lock, detecting NVM type, validate checksum and read */
    917 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    918 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    919 static int	wm_nvm_validate_checksum(struct wm_softc *);
    920 static void	wm_nvm_version_invm(struct wm_softc *);
    921 static void	wm_nvm_version(struct wm_softc *);
    922 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    923 
    924 /*
    925  * Hardware semaphores.
    926  * Very complexed...
    927  */
    928 static int	wm_get_null(struct wm_softc *);
    929 static void	wm_put_null(struct wm_softc *);
    930 static int	wm_get_eecd(struct wm_softc *);
    931 static void	wm_put_eecd(struct wm_softc *);
    932 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    933 static void	wm_put_swsm_semaphore(struct wm_softc *);
    934 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static int	wm_get_nvm_80003(struct wm_softc *);
    937 static void	wm_put_nvm_80003(struct wm_softc *);
    938 static int	wm_get_nvm_82571(struct wm_softc *);
    939 static void	wm_put_nvm_82571(struct wm_softc *);
    940 static int	wm_get_phy_82575(struct wm_softc *);
    941 static void	wm_put_phy_82575(struct wm_softc *);
    942 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    943 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    944 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    945 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    946 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    947 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    948 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    949 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    950 
    951 /*
    952  * Management mode and power management related subroutines.
    953  * BMC, AMT, suspend/resume and EEE.
    954  */
    955 #if 0
    956 static int	wm_check_mng_mode(struct wm_softc *);
    957 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    958 static int	wm_check_mng_mode_82574(struct wm_softc *);
    959 static int	wm_check_mng_mode_generic(struct wm_softc *);
    960 #endif
    961 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    962 static bool	wm_phy_resetisblocked(struct wm_softc *);
    963 static void	wm_get_hw_control(struct wm_softc *);
    964 static void	wm_release_hw_control(struct wm_softc *);
    965 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    966 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    967 static void	wm_init_manageability(struct wm_softc *);
    968 static void	wm_release_manageability(struct wm_softc *);
    969 static void	wm_get_wakeup(struct wm_softc *);
    970 static int	wm_ulp_disable(struct wm_softc *);
    971 static int	wm_enable_phy_wakeup(struct wm_softc *);
    972 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    974 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    975 static void	wm_enable_wakeup(struct wm_softc *);
    976 static void	wm_disable_aspm(struct wm_softc *);
    977 /* LPLU (Low Power Link Up) */
    978 static void	wm_lplu_d0_disable(struct wm_softc *);
    979 /* EEE */
    980 static int	wm_set_eee_i350(struct wm_softc *);
    981 static int	wm_set_eee_pchlan(struct wm_softc *);
    982 static int	wm_set_eee(struct wm_softc *);
    983 
    984 /*
    985  * Workarounds (mainly PHY related).
    986  * Basically, PHY's workarounds are in the PHY drivers.
    987  */
    988 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    989 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    990 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    991 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    992 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    993 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    994 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    995 static int	wm_k1_workaround_lv(struct wm_softc *);
    996 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    997 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    998 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    999 static void	wm_reset_init_script_82575(struct wm_softc *);
   1000 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1001 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1002 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1003 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1004 static int	wm_pll_workaround_i210(struct wm_softc *);
   1005 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1006 
   1007 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1008     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1009 
   1010 /*
   1011  * Devices supported by this driver.
   1012  */
   1013 static const struct wm_product {
   1014 	pci_vendor_id_t		wmp_vendor;
   1015 	pci_product_id_t	wmp_product;
   1016 	const char		*wmp_name;
   1017 	wm_chip_type		wmp_type;
   1018 	uint32_t		wmp_flags;
   1019 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1020 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1021 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1022 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1023 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1024 } wm_products[] = {
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1026 	  "Intel i82542 1000BASE-X Ethernet",
   1027 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1030 	  "Intel i82543GC 1000BASE-X Ethernet",
   1031 	  WM_T_82543,		WMP_F_FIBER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1034 	  "Intel i82543GC 1000BASE-T Ethernet",
   1035 	  WM_T_82543,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1038 	  "Intel i82544EI 1000BASE-T Ethernet",
   1039 	  WM_T_82544,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1042 	  "Intel i82544EI 1000BASE-X Ethernet",
   1043 	  WM_T_82544,		WMP_F_FIBER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1046 	  "Intel i82544GC 1000BASE-T Ethernet",
   1047 	  WM_T_82544,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1050 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1051 	  WM_T_82544,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1054 	  "Intel i82540EM 1000BASE-T Ethernet",
   1055 	  WM_T_82540,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1058 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1059 	  WM_T_82540,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1062 	  "Intel i82540EP 1000BASE-T Ethernet",
   1063 	  WM_T_82540,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1066 	  "Intel i82540EP 1000BASE-T Ethernet",
   1067 	  WM_T_82540,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1070 	  "Intel i82540EP 1000BASE-T Ethernet",
   1071 	  WM_T_82540,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1074 	  "Intel i82545EM 1000BASE-T Ethernet",
   1075 	  WM_T_82545,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1078 	  "Intel i82545GM 1000BASE-T Ethernet",
   1079 	  WM_T_82545_3,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1082 	  "Intel i82545GM 1000BASE-X Ethernet",
   1083 	  WM_T_82545_3,		WMP_F_FIBER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1086 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1087 	  WM_T_82545_3,		WMP_F_SERDES },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1090 	  "Intel i82546EB 1000BASE-T Ethernet",
   1091 	  WM_T_82546,		WMP_F_COPPER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1094 	  "Intel i82546EB 1000BASE-T Ethernet",
   1095 	  WM_T_82546,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1098 	  "Intel i82545EM 1000BASE-X Ethernet",
   1099 	  WM_T_82545,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1102 	  "Intel i82546EB 1000BASE-X Ethernet",
   1103 	  WM_T_82546,		WMP_F_FIBER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1106 	  "Intel i82546GB 1000BASE-T Ethernet",
   1107 	  WM_T_82546_3,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1110 	  "Intel i82546GB 1000BASE-X Ethernet",
   1111 	  WM_T_82546_3,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1114 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1115 	  WM_T_82546_3,		WMP_F_SERDES },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1118 	  "i82546GB quad-port Gigabit Ethernet",
   1119 	  WM_T_82546_3,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1122 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1123 	  WM_T_82546_3,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1126 	  "Intel PRO/1000MT (82546GB)",
   1127 	  WM_T_82546_3,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1130 	  "Intel i82541EI 1000BASE-T Ethernet",
   1131 	  WM_T_82541,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1134 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1135 	  WM_T_82541,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1138 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1139 	  WM_T_82541,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1142 	  "Intel i82541ER 1000BASE-T Ethernet",
   1143 	  WM_T_82541_2,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1146 	  "Intel i82541GI 1000BASE-T Ethernet",
   1147 	  WM_T_82541_2,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1150 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1151 	  WM_T_82541_2,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1154 	  "Intel i82541PI 1000BASE-T Ethernet",
   1155 	  WM_T_82541_2,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1158 	  "Intel i82547EI 1000BASE-T Ethernet",
   1159 	  WM_T_82547,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1162 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1163 	  WM_T_82547,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1166 	  "Intel i82547GI 1000BASE-T Ethernet",
   1167 	  WM_T_82547_2,		WMP_F_COPPER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1170 	  "Intel PRO/1000 PT (82571EB)",
   1171 	  WM_T_82571,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1174 	  "Intel PRO/1000 PF (82571EB)",
   1175 	  WM_T_82571,		WMP_F_FIBER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1178 	  "Intel PRO/1000 PB (82571EB)",
   1179 	  WM_T_82571,		WMP_F_SERDES },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1182 	  "Intel PRO/1000 QT (82571EB)",
   1183 	  WM_T_82571,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1186 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1187 	  WM_T_82571,		WMP_F_COPPER, },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1190 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1191 	  WM_T_82571,		WMP_F_COPPER, },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1194 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82571,		WMP_F_SERDES, },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1198 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1199 	  WM_T_82571,		WMP_F_SERDES, },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1202 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1203 	  WM_T_82571,		WMP_F_FIBER, },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1206 	  "Intel i82572EI 1000baseT Ethernet",
   1207 	  WM_T_82572,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1210 	  "Intel i82572EI 1000baseX Ethernet",
   1211 	  WM_T_82572,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1214 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1215 	  WM_T_82572,		WMP_F_SERDES },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1218 	  "Intel i82572EI 1000baseT Ethernet",
   1219 	  WM_T_82572,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1222 	  "Intel i82573E",
   1223 	  WM_T_82573,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1226 	  "Intel i82573E IAMT",
   1227 	  WM_T_82573,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1230 	  "Intel i82573L Gigabit Ethernet",
   1231 	  WM_T_82573,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1234 	  "Intel i82574L",
   1235 	  WM_T_82574,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1238 	  "Intel i82574L",
   1239 	  WM_T_82574,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1242 	  "Intel i82583V",
   1243 	  WM_T_82583,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1246 	  "i80003 dual 1000baseT Ethernet",
   1247 	  WM_T_80003,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1250 	  "i80003 dual 1000baseX Ethernet",
   1251 	  WM_T_80003,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1254 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1255 	  WM_T_80003,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1258 	  "Intel i80003 1000baseT Ethernet",
   1259 	  WM_T_80003,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1262 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1263 	  WM_T_80003,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1266 	  "Intel i82801H (M_AMT) LAN Controller",
   1267 	  WM_T_ICH8,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1269 	  "Intel i82801H (AMT) LAN Controller",
   1270 	  WM_T_ICH8,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1272 	  "Intel i82801H LAN Controller",
   1273 	  WM_T_ICH8,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1275 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1276 	  WM_T_ICH8,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1278 	  "Intel i82801H (M) LAN Controller",
   1279 	  WM_T_ICH8,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1281 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1284 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1287 	  "82567V-3 LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1290 	  "82801I (AMT) LAN Controller",
   1291 	  WM_T_ICH9,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1293 	  "82801I 10/100 LAN Controller",
   1294 	  WM_T_ICH9,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1296 	  "82801I (G) 10/100 LAN Controller",
   1297 	  WM_T_ICH9,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1299 	  "82801I (GT) 10/100 LAN Controller",
   1300 	  WM_T_ICH9,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1302 	  "82801I (C) LAN Controller",
   1303 	  WM_T_ICH9,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1305 	  "82801I mobile LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1308 	  "82801I mobile (V) LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1311 	  "82801I mobile (AMT) LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1314 	  "82567LM-4 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1317 	  "82567LM-2 LAN Controller",
   1318 	  WM_T_ICH10,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1320 	  "82567LF-2 LAN Controller",
   1321 	  WM_T_ICH10,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1323 	  "82567LM-3 LAN Controller",
   1324 	  WM_T_ICH10,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1326 	  "82567LF-3 LAN Controller",
   1327 	  WM_T_ICH10,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1329 	  "82567V-2 LAN Controller",
   1330 	  WM_T_ICH10,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1332 	  "82567V-3? LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1335 	  "HANKSVILLE LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1338 	  "PCH LAN (82577LM) Controller",
   1339 	  WM_T_PCH,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1341 	  "PCH LAN (82577LC) Controller",
   1342 	  WM_T_PCH,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1344 	  "PCH LAN (82578DM) Controller",
   1345 	  WM_T_PCH,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1347 	  "PCH LAN (82578DC) Controller",
   1348 	  WM_T_PCH,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1350 	  "PCH2 LAN (82579LM) Controller",
   1351 	  WM_T_PCH2,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1353 	  "PCH2 LAN (82579V) Controller",
   1354 	  WM_T_PCH2,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1356 	  "82575EB dual-1000baseT Ethernet",
   1357 	  WM_T_82575,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1359 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1360 	  WM_T_82575,		WMP_F_SERDES },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1362 	  "82575GB quad-1000baseT Ethernet",
   1363 	  WM_T_82575,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1365 	  "82575GB quad-1000baseT Ethernet (PM)",
   1366 	  WM_T_82575,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1368 	  "82576 1000BaseT Ethernet",
   1369 	  WM_T_82576,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1371 	  "82576 1000BaseX Ethernet",
   1372 	  WM_T_82576,		WMP_F_FIBER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1375 	  "82576 gigabit Ethernet (SERDES)",
   1376 	  WM_T_82576,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1379 	  "82576 quad-1000BaseT Ethernet",
   1380 	  WM_T_82576,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1383 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1387 	  "82576 gigabit Ethernet",
   1388 	  WM_T_82576,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1391 	  "82576 gigabit Ethernet (SERDES)",
   1392 	  WM_T_82576,		WMP_F_SERDES },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1394 	  "82576 quad-gigabit Ethernet (SERDES)",
   1395 	  WM_T_82576,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1398 	  "82580 1000BaseT Ethernet",
   1399 	  WM_T_82580,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1401 	  "82580 1000BaseX Ethernet",
   1402 	  WM_T_82580,		WMP_F_FIBER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1405 	  "82580 1000BaseT Ethernet (SERDES)",
   1406 	  WM_T_82580,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1409 	  "82580 gigabit Ethernet (SGMII)",
   1410 	  WM_T_82580,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1412 	  "82580 dual-1000BaseT Ethernet",
   1413 	  WM_T_82580,		WMP_F_COPPER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1416 	  "82580 quad-1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1420 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1421 	  WM_T_82580,		WMP_F_COPPER },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1424 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1425 	  WM_T_82580,		WMP_F_SERDES },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1428 	  "DH89XXCC 1000BASE-KX Ethernet",
   1429 	  WM_T_82580,		WMP_F_SERDES },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1432 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1433 	  WM_T_82580,		WMP_F_SERDES },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1436 	  "I350 Gigabit Network Connection",
   1437 	  WM_T_I350,		WMP_F_COPPER },
   1438 
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1440 	  "I350 Gigabit Fiber Network Connection",
   1441 	  WM_T_I350,		WMP_F_FIBER },
   1442 
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1444 	  "I350 Gigabit Backplane Connection",
   1445 	  WM_T_I350,		WMP_F_SERDES },
   1446 
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1448 	  "I350 Quad Port Gigabit Ethernet",
   1449 	  WM_T_I350,		WMP_F_SERDES },
   1450 
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1452 	  "I350 Gigabit Connection",
   1453 	  WM_T_I350,		WMP_F_COPPER },
   1454 
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1456 	  "I354 Gigabit Ethernet (KX)",
   1457 	  WM_T_I354,		WMP_F_SERDES },
   1458 
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1460 	  "I354 Gigabit Ethernet (SGMII)",
   1461 	  WM_T_I354,		WMP_F_COPPER },
   1462 
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1464 	  "I354 Gigabit Ethernet (2.5G)",
   1465 	  WM_T_I354,		WMP_F_COPPER },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1468 	  "I210-T1 Ethernet Server Adapter",
   1469 	  WM_T_I210,		WMP_F_COPPER },
   1470 
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1472 	  "I210 Ethernet (Copper OEM)",
   1473 	  WM_T_I210,		WMP_F_COPPER },
   1474 
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1476 	  "I210 Ethernet (Copper IT)",
   1477 	  WM_T_I210,		WMP_F_COPPER },
   1478 
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1480 	  "I210 Ethernet (Copper, FLASH less)",
   1481 	  WM_T_I210,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1484 	  "I210 Gigabit Ethernet (Fiber)",
   1485 	  WM_T_I210,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1488 	  "I210 Gigabit Ethernet (SERDES)",
   1489 	  WM_T_I210,		WMP_F_SERDES },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1492 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1493 	  WM_T_I210,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1496 	  "I210 Gigabit Ethernet (SGMII)",
   1497 	  WM_T_I210,		WMP_F_COPPER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1500 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1501 	  WM_T_I210,		WMP_F_COPPER },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1504 	  "I211 Ethernet (COPPER)",
   1505 	  WM_T_I211,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1507 	  "I217 V Ethernet Connection",
   1508 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1510 	  "I217 LM Ethernet Connection",
   1511 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1513 	  "I218 V Ethernet Connection",
   1514 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1516 	  "I218 V Ethernet Connection",
   1517 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1519 	  "I218 V Ethernet Connection",
   1520 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1522 	  "I218 LM Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1525 	  "I218 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1528 	  "I218 LM Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1531 	  "I219 LM Ethernet Connection",
   1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1537 	  "I219 LM Ethernet Connection",
   1538 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1540 	  "I219 LM Ethernet Connection",
   1541 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1543 	  "I219 LM Ethernet Connection",
   1544 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1558 	  "I219 V Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1561 	  "I219 V Ethernet Connection",
   1562 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1564 	  "I219 V Ethernet Connection",
   1565 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1567 	  "I219 V Ethernet Connection",
   1568 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1570 	  "I219 V Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1573 	  "I219 V Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1576 	  "I219 V Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1579 	  "I219 V Ethernet Connection",
   1580 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1581 	{ 0,			0,
   1582 	  NULL,
   1583 	  0,			0 },
   1584 };
   1585 
   1586 /*
   1587  * Register read/write functions.
   1588  * Other than CSR_{READ|WRITE}().
   1589  */
   1590 
   1591 #if 0 /* Not currently used */
   1592 static inline uint32_t
   1593 wm_io_read(struct wm_softc *sc, int reg)
   1594 {
   1595 
   1596 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1597 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1598 }
   1599 #endif
   1600 
   1601 static inline void
   1602 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1603 {
   1604 
   1605 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1607 }
   1608 
   1609 static inline void
   1610 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1611     uint32_t data)
   1612 {
   1613 	uint32_t regval;
   1614 	int i;
   1615 
   1616 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1617 
   1618 	CSR_WRITE(sc, reg, regval);
   1619 
   1620 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1621 		delay(5);
   1622 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1623 			break;
   1624 	}
   1625 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1626 		aprint_error("%s: WARNING:"
   1627 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1628 		    device_xname(sc->sc_dev), reg);
   1629 	}
   1630 }
   1631 
   1632 static inline void
   1633 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1634 {
   1635 	wa->wa_low = htole32(v & 0xffffffffU);
   1636 	if (sizeof(bus_addr_t) == 8)
   1637 		wa->wa_high = htole32((uint64_t) v >> 32);
   1638 	else
   1639 		wa->wa_high = 0;
   1640 }
   1641 
   1642 /*
   1643  * Descriptor sync/init functions.
   1644  */
   1645 static inline void
   1646 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1647 {
   1648 	struct wm_softc *sc = txq->txq_sc;
   1649 
   1650 	/* If it will wrap around, sync to the end of the ring. */
   1651 	if ((start + num) > WM_NTXDESC(txq)) {
   1652 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1653 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1654 		    (WM_NTXDESC(txq) - start), ops);
   1655 		num -= (WM_NTXDESC(txq) - start);
   1656 		start = 0;
   1657 	}
   1658 
   1659 	/* Now sync whatever is left. */
   1660 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1661 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1662 }
   1663 
   1664 static inline void
   1665 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1666 {
   1667 	struct wm_softc *sc = rxq->rxq_sc;
   1668 
   1669 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1670 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1671 }
   1672 
   1673 static inline void
   1674 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1675 {
   1676 	struct wm_softc *sc = rxq->rxq_sc;
   1677 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1678 	struct mbuf *m = rxs->rxs_mbuf;
   1679 
   1680 	/*
   1681 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1682 	 * so that the payload after the Ethernet header is aligned
   1683 	 * to a 4-byte boundary.
   1684 
   1685 	 * XXX BRAINDAMAGE ALERT!
   1686 	 * The stupid chip uses the same size for every buffer, which
   1687 	 * is set in the Receive Control register.  We are using the 2K
   1688 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1689 	 * reason, we can't "scoot" packets longer than the standard
   1690 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1691 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1692 	 * the upper layer copy the headers.
   1693 	 */
   1694 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1695 
   1696 	if (sc->sc_type == WM_T_82574) {
   1697 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1698 		rxd->erx_data.erxd_addr =
   1699 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1700 		rxd->erx_data.erxd_dd = 0;
   1701 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1702 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1703 
   1704 		rxd->nqrx_data.nrxd_paddr =
   1705 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1706 		/* Currently, split header is not supported. */
   1707 		rxd->nqrx_data.nrxd_haddr = 0;
   1708 	} else {
   1709 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1710 
   1711 		wm_set_dma_addr(&rxd->wrx_addr,
   1712 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1713 		rxd->wrx_len = 0;
   1714 		rxd->wrx_cksum = 0;
   1715 		rxd->wrx_status = 0;
   1716 		rxd->wrx_errors = 0;
   1717 		rxd->wrx_special = 0;
   1718 	}
   1719 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1720 
   1721 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1722 }
   1723 
   1724 /*
   1725  * Device driver interface functions and commonly used functions.
   1726  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1727  */
   1728 
   1729 /* Lookup supported device table */
   1730 static const struct wm_product *
   1731 wm_lookup(const struct pci_attach_args *pa)
   1732 {
   1733 	const struct wm_product *wmp;
   1734 
   1735 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1736 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1737 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1738 			return wmp;
   1739 	}
   1740 	return NULL;
   1741 }
   1742 
   1743 /* The match function (ca_match) */
   1744 static int
   1745 wm_match(device_t parent, cfdata_t cf, void *aux)
   1746 {
   1747 	struct pci_attach_args *pa = aux;
   1748 
   1749 	if (wm_lookup(pa) != NULL)
   1750 		return 1;
   1751 
   1752 	return 0;
   1753 }
   1754 
   1755 /* The attach function (ca_attach) */
   1756 static void
   1757 wm_attach(device_t parent, device_t self, void *aux)
   1758 {
   1759 	struct wm_softc *sc = device_private(self);
   1760 	struct pci_attach_args *pa = aux;
   1761 	prop_dictionary_t dict;
   1762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1763 	pci_chipset_tag_t pc = pa->pa_pc;
   1764 	int counts[PCI_INTR_TYPE_SIZE];
   1765 	pci_intr_type_t max_type;
   1766 	const char *eetype, *xname;
   1767 	bus_space_tag_t memt;
   1768 	bus_space_handle_t memh;
   1769 	bus_size_t memsize;
   1770 	int memh_valid;
   1771 	int i, error;
   1772 	const struct wm_product *wmp;
   1773 	prop_data_t ea;
   1774 	prop_number_t pn;
   1775 	uint8_t enaddr[ETHER_ADDR_LEN];
   1776 	char buf[256];
   1777 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1778 	pcireg_t preg, memtype;
   1779 	uint16_t eeprom_data, apme_mask;
   1780 	bool force_clear_smbi;
   1781 	uint32_t link_mode;
   1782 	uint32_t reg;
   1783 
   1784 	sc->sc_dev = self;
   1785 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1786 	sc->sc_core_stopping = false;
   1787 
   1788 	wmp = wm_lookup(pa);
   1789 #ifdef DIAGNOSTIC
   1790 	if (wmp == NULL) {
   1791 		printf("\n");
   1792 		panic("wm_attach: impossible");
   1793 	}
   1794 #endif
   1795 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1796 
   1797 	sc->sc_pc = pa->pa_pc;
   1798 	sc->sc_pcitag = pa->pa_tag;
   1799 
   1800 	if (pci_dma64_available(pa))
   1801 		sc->sc_dmat = pa->pa_dmat64;
   1802 	else
   1803 		sc->sc_dmat = pa->pa_dmat;
   1804 
   1805 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1806 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1807 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1808 
   1809 	sc->sc_type = wmp->wmp_type;
   1810 
   1811 	/* Set default function pointers */
   1812 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1813 	sc->phy.release = sc->nvm.release = wm_put_null;
   1814 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1815 
   1816 	if (sc->sc_type < WM_T_82543) {
   1817 		if (sc->sc_rev < 2) {
   1818 			aprint_error_dev(sc->sc_dev,
   1819 			    "i82542 must be at least rev. 2\n");
   1820 			return;
   1821 		}
   1822 		if (sc->sc_rev < 3)
   1823 			sc->sc_type = WM_T_82542_2_0;
   1824 	}
   1825 
   1826 	/*
   1827 	 * Disable MSI for Errata:
   1828 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1829 	 *
   1830 	 *  82544: Errata 25
   1831 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1832 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1833 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1834 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1835 	 *
   1836 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1837 	 *
   1838 	 *  82571 & 82572: Errata 63
   1839 	 */
   1840 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1841 	    || (sc->sc_type == WM_T_82572))
   1842 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1843 
   1844 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1845 	    || (sc->sc_type == WM_T_82580)
   1846 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1847 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1848 		sc->sc_flags |= WM_F_NEWQUEUE;
   1849 
   1850 	/* Set device properties (mactype) */
   1851 	dict = device_properties(sc->sc_dev);
   1852 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1853 
   1854 	/*
   1855 	 * Map the device.  All devices support memory-mapped acccess,
   1856 	 * and it is really required for normal operation.
   1857 	 */
   1858 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1859 	switch (memtype) {
   1860 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1862 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1863 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1864 		break;
   1865 	default:
   1866 		memh_valid = 0;
   1867 		break;
   1868 	}
   1869 
   1870 	if (memh_valid) {
   1871 		sc->sc_st = memt;
   1872 		sc->sc_sh = memh;
   1873 		sc->sc_ss = memsize;
   1874 	} else {
   1875 		aprint_error_dev(sc->sc_dev,
   1876 		    "unable to map device registers\n");
   1877 		return;
   1878 	}
   1879 
   1880 	/*
   1881 	 * In addition, i82544 and later support I/O mapped indirect
   1882 	 * register access.  It is not desirable (nor supported in
   1883 	 * this driver) to use it for normal operation, though it is
   1884 	 * required to work around bugs in some chip versions.
   1885 	 */
   1886 	if (sc->sc_type >= WM_T_82544) {
   1887 		/* First we have to find the I/O BAR. */
   1888 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1889 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1890 			if (memtype == PCI_MAPREG_TYPE_IO)
   1891 				break;
   1892 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1893 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1894 				i += 4;	/* skip high bits, too */
   1895 		}
   1896 		if (i < PCI_MAPREG_END) {
   1897 			/*
   1898 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1899 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1900 			 * It's no problem because newer chips has no this
   1901 			 * bug.
   1902 			 *
   1903 			 * The i8254x doesn't apparently respond when the
   1904 			 * I/O BAR is 0, which looks somewhat like it's not
   1905 			 * been configured.
   1906 			 */
   1907 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1908 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1909 				aprint_error_dev(sc->sc_dev,
   1910 				    "WARNING: I/O BAR at zero.\n");
   1911 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1912 					0, &sc->sc_iot, &sc->sc_ioh,
   1913 					NULL, &sc->sc_ios) == 0) {
   1914 				sc->sc_flags |= WM_F_IOH_VALID;
   1915 			} else
   1916 				aprint_error_dev(sc->sc_dev,
   1917 				    "WARNING: unable to map I/O space\n");
   1918 		}
   1919 
   1920 	}
   1921 
   1922 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1923 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1924 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1925 	if (sc->sc_type < WM_T_82542_2_1)
   1926 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1927 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1928 
   1929 	/* Power up chip */
   1930 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1931 	    && error != EOPNOTSUPP) {
   1932 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1933 		return;
   1934 	}
   1935 
   1936 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1937 	/*
   1938 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1939 	 * resource.
   1940 	 */
   1941 	if (sc->sc_nqueues > 1) {
   1942 		max_type = PCI_INTR_TYPE_MSIX;
   1943 		/*
   1944 		 *  82583 has a MSI-X capability in the PCI configuration space
   1945 		 * but it doesn't support it. At least the document doesn't
   1946 		 * say anything about MSI-X.
   1947 		 */
   1948 		counts[PCI_INTR_TYPE_MSIX]
   1949 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1950 	} else {
   1951 		max_type = PCI_INTR_TYPE_MSI;
   1952 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1953 	}
   1954 
   1955 	/* Allocation settings */
   1956 	counts[PCI_INTR_TYPE_MSI] = 1;
   1957 	counts[PCI_INTR_TYPE_INTX] = 1;
   1958 	/* overridden by disable flags */
   1959 	if (wm_disable_msi != 0) {
   1960 		counts[PCI_INTR_TYPE_MSI] = 0;
   1961 		if (wm_disable_msix != 0) {
   1962 			max_type = PCI_INTR_TYPE_INTX;
   1963 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1964 		}
   1965 	} else if (wm_disable_msix != 0) {
   1966 		max_type = PCI_INTR_TYPE_MSI;
   1967 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1968 	}
   1969 
   1970 alloc_retry:
   1971 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1972 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1973 		return;
   1974 	}
   1975 
   1976 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1977 		error = wm_setup_msix(sc);
   1978 		if (error) {
   1979 			pci_intr_release(pc, sc->sc_intrs,
   1980 			    counts[PCI_INTR_TYPE_MSIX]);
   1981 
   1982 			/* Setup for MSI: Disable MSI-X */
   1983 			max_type = PCI_INTR_TYPE_MSI;
   1984 			counts[PCI_INTR_TYPE_MSI] = 1;
   1985 			counts[PCI_INTR_TYPE_INTX] = 1;
   1986 			goto alloc_retry;
   1987 		}
   1988 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1989 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1990 		error = wm_setup_legacy(sc);
   1991 		if (error) {
   1992 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1993 			    counts[PCI_INTR_TYPE_MSI]);
   1994 
   1995 			/* The next try is for INTx: Disable MSI */
   1996 			max_type = PCI_INTR_TYPE_INTX;
   1997 			counts[PCI_INTR_TYPE_INTX] = 1;
   1998 			goto alloc_retry;
   1999 		}
   2000 	} else {
   2001 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2002 		error = wm_setup_legacy(sc);
   2003 		if (error) {
   2004 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2005 			    counts[PCI_INTR_TYPE_INTX]);
   2006 			return;
   2007 		}
   2008 	}
   2009 
   2010 	/*
   2011 	 * Check the function ID (unit number of the chip).
   2012 	 */
   2013 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2014 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2015 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2016 	    || (sc->sc_type == WM_T_82580)
   2017 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2018 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2019 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2020 	else
   2021 		sc->sc_funcid = 0;
   2022 
   2023 	/*
   2024 	 * Determine a few things about the bus we're connected to.
   2025 	 */
   2026 	if (sc->sc_type < WM_T_82543) {
   2027 		/* We don't really know the bus characteristics here. */
   2028 		sc->sc_bus_speed = 33;
   2029 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2030 		/*
   2031 		 * CSA (Communication Streaming Architecture) is about as fast
   2032 		 * a 32-bit 66MHz PCI Bus.
   2033 		 */
   2034 		sc->sc_flags |= WM_F_CSA;
   2035 		sc->sc_bus_speed = 66;
   2036 		aprint_verbose_dev(sc->sc_dev,
   2037 		    "Communication Streaming Architecture\n");
   2038 		if (sc->sc_type == WM_T_82547) {
   2039 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2040 			callout_setfunc(&sc->sc_txfifo_ch,
   2041 			    wm_82547_txfifo_stall, sc);
   2042 			aprint_verbose_dev(sc->sc_dev,
   2043 			    "using 82547 Tx FIFO stall work-around\n");
   2044 		}
   2045 	} else if (sc->sc_type >= WM_T_82571) {
   2046 		sc->sc_flags |= WM_F_PCIE;
   2047 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2048 		    && (sc->sc_type != WM_T_ICH10)
   2049 		    && (sc->sc_type != WM_T_PCH)
   2050 		    && (sc->sc_type != WM_T_PCH2)
   2051 		    && (sc->sc_type != WM_T_PCH_LPT)
   2052 		    && (sc->sc_type != WM_T_PCH_SPT)
   2053 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2054 			/* ICH* and PCH* have no PCIe capability registers */
   2055 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2056 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2057 				NULL) == 0)
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unable to find PCIe capability\n");
   2060 		}
   2061 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2062 	} else {
   2063 		reg = CSR_READ(sc, WMREG_STATUS);
   2064 		if (reg & STATUS_BUS64)
   2065 			sc->sc_flags |= WM_F_BUS64;
   2066 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2067 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2068 
   2069 			sc->sc_flags |= WM_F_PCIX;
   2070 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2071 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2072 				aprint_error_dev(sc->sc_dev,
   2073 				    "unable to find PCIX capability\n");
   2074 			else if (sc->sc_type != WM_T_82545_3 &&
   2075 				 sc->sc_type != WM_T_82546_3) {
   2076 				/*
   2077 				 * Work around a problem caused by the BIOS
   2078 				 * setting the max memory read byte count
   2079 				 * incorrectly.
   2080 				 */
   2081 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2082 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2083 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2084 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2085 
   2086 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2087 				    PCIX_CMD_BYTECNT_SHIFT;
   2088 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2089 				    PCIX_STATUS_MAXB_SHIFT;
   2090 				if (bytecnt > maxb) {
   2091 					aprint_verbose_dev(sc->sc_dev,
   2092 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2093 					    512 << bytecnt, 512 << maxb);
   2094 					pcix_cmd = (pcix_cmd &
   2095 					    ~PCIX_CMD_BYTECNT_MASK) |
   2096 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2097 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2098 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2099 					    pcix_cmd);
   2100 				}
   2101 			}
   2102 		}
   2103 		/*
   2104 		 * The quad port adapter is special; it has a PCIX-PCIX
   2105 		 * bridge on the board, and can run the secondary bus at
   2106 		 * a higher speed.
   2107 		 */
   2108 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2109 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2110 								      : 66;
   2111 		} else if (sc->sc_flags & WM_F_PCIX) {
   2112 			switch (reg & STATUS_PCIXSPD_MASK) {
   2113 			case STATUS_PCIXSPD_50_66:
   2114 				sc->sc_bus_speed = 66;
   2115 				break;
   2116 			case STATUS_PCIXSPD_66_100:
   2117 				sc->sc_bus_speed = 100;
   2118 				break;
   2119 			case STATUS_PCIXSPD_100_133:
   2120 				sc->sc_bus_speed = 133;
   2121 				break;
   2122 			default:
   2123 				aprint_error_dev(sc->sc_dev,
   2124 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2125 				    reg & STATUS_PCIXSPD_MASK);
   2126 				sc->sc_bus_speed = 66;
   2127 				break;
   2128 			}
   2129 		} else
   2130 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2131 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2132 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2133 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2134 	}
   2135 
   2136 	/* clear interesting stat counters */
   2137 	CSR_READ(sc, WMREG_COLC);
   2138 	CSR_READ(sc, WMREG_RXERRC);
   2139 
   2140 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2141 	    || (sc->sc_type >= WM_T_ICH8))
   2142 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2143 	if (sc->sc_type >= WM_T_ICH8)
   2144 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2145 
   2146 	/* Set PHY, NVM mutex related stuff */
   2147 	switch (sc->sc_type) {
   2148 	case WM_T_82542_2_0:
   2149 	case WM_T_82542_2_1:
   2150 	case WM_T_82543:
   2151 	case WM_T_82544:
   2152 		/* Microwire */
   2153 		sc->nvm.read = wm_nvm_read_uwire;
   2154 		sc->sc_nvm_wordsize = 64;
   2155 		sc->sc_nvm_addrbits = 6;
   2156 		break;
   2157 	case WM_T_82540:
   2158 	case WM_T_82545:
   2159 	case WM_T_82545_3:
   2160 	case WM_T_82546:
   2161 	case WM_T_82546_3:
   2162 		/* Microwire */
   2163 		sc->nvm.read = wm_nvm_read_uwire;
   2164 		reg = CSR_READ(sc, WMREG_EECD);
   2165 		if (reg & EECD_EE_SIZE) {
   2166 			sc->sc_nvm_wordsize = 256;
   2167 			sc->sc_nvm_addrbits = 8;
   2168 		} else {
   2169 			sc->sc_nvm_wordsize = 64;
   2170 			sc->sc_nvm_addrbits = 6;
   2171 		}
   2172 		sc->sc_flags |= WM_F_LOCK_EECD;
   2173 		sc->nvm.acquire = wm_get_eecd;
   2174 		sc->nvm.release = wm_put_eecd;
   2175 		break;
   2176 	case WM_T_82541:
   2177 	case WM_T_82541_2:
   2178 	case WM_T_82547:
   2179 	case WM_T_82547_2:
   2180 		reg = CSR_READ(sc, WMREG_EECD);
   2181 		/*
   2182 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2183 		 * on 8254[17], so set flags and functios before calling it.
   2184 		 */
   2185 		sc->sc_flags |= WM_F_LOCK_EECD;
   2186 		sc->nvm.acquire = wm_get_eecd;
   2187 		sc->nvm.release = wm_put_eecd;
   2188 		if (reg & EECD_EE_TYPE) {
   2189 			/* SPI */
   2190 			sc->nvm.read = wm_nvm_read_spi;
   2191 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2192 			wm_nvm_set_addrbits_size_eecd(sc);
   2193 		} else {
   2194 			/* Microwire */
   2195 			sc->nvm.read = wm_nvm_read_uwire;
   2196 			if ((reg & EECD_EE_ABITS) != 0) {
   2197 				sc->sc_nvm_wordsize = 256;
   2198 				sc->sc_nvm_addrbits = 8;
   2199 			} else {
   2200 				sc->sc_nvm_wordsize = 64;
   2201 				sc->sc_nvm_addrbits = 6;
   2202 			}
   2203 		}
   2204 		break;
   2205 	case WM_T_82571:
   2206 	case WM_T_82572:
   2207 		/* SPI */
   2208 		sc->nvm.read = wm_nvm_read_eerd;
   2209 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2210 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2211 		wm_nvm_set_addrbits_size_eecd(sc);
   2212 		sc->phy.acquire = wm_get_swsm_semaphore;
   2213 		sc->phy.release = wm_put_swsm_semaphore;
   2214 		sc->nvm.acquire = wm_get_nvm_82571;
   2215 		sc->nvm.release = wm_put_nvm_82571;
   2216 		break;
   2217 	case WM_T_82573:
   2218 	case WM_T_82574:
   2219 	case WM_T_82583:
   2220 		sc->nvm.read = wm_nvm_read_eerd;
   2221 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2222 		if (sc->sc_type == WM_T_82573) {
   2223 			sc->phy.acquire = wm_get_swsm_semaphore;
   2224 			sc->phy.release = wm_put_swsm_semaphore;
   2225 			sc->nvm.acquire = wm_get_nvm_82571;
   2226 			sc->nvm.release = wm_put_nvm_82571;
   2227 		} else {
   2228 			/* Both PHY and NVM use the same semaphore. */
   2229 			sc->phy.acquire = sc->nvm.acquire
   2230 			    = wm_get_swfwhw_semaphore;
   2231 			sc->phy.release = sc->nvm.release
   2232 			    = wm_put_swfwhw_semaphore;
   2233 		}
   2234 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2235 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2236 			sc->sc_nvm_wordsize = 2048;
   2237 		} else {
   2238 			/* SPI */
   2239 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2240 			wm_nvm_set_addrbits_size_eecd(sc);
   2241 		}
   2242 		break;
   2243 	case WM_T_82575:
   2244 	case WM_T_82576:
   2245 	case WM_T_82580:
   2246 	case WM_T_I350:
   2247 	case WM_T_I354:
   2248 	case WM_T_80003:
   2249 		/* SPI */
   2250 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2251 		wm_nvm_set_addrbits_size_eecd(sc);
   2252 		if ((sc->sc_type == WM_T_80003)
   2253 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2254 			sc->nvm.read = wm_nvm_read_eerd;
   2255 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2256 		} else {
   2257 			sc->nvm.read = wm_nvm_read_spi;
   2258 			sc->sc_flags |= WM_F_LOCK_EECD;
   2259 		}
   2260 		sc->phy.acquire = wm_get_phy_82575;
   2261 		sc->phy.release = wm_put_phy_82575;
   2262 		sc->nvm.acquire = wm_get_nvm_80003;
   2263 		sc->nvm.release = wm_put_nvm_80003;
   2264 		break;
   2265 	case WM_T_ICH8:
   2266 	case WM_T_ICH9:
   2267 	case WM_T_ICH10:
   2268 	case WM_T_PCH:
   2269 	case WM_T_PCH2:
   2270 	case WM_T_PCH_LPT:
   2271 		sc->nvm.read = wm_nvm_read_ich8;
   2272 		/* FLASH */
   2273 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2274 		sc->sc_nvm_wordsize = 2048;
   2275 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2276 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2277 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2278 			aprint_error_dev(sc->sc_dev,
   2279 			    "can't map FLASH registers\n");
   2280 			goto out;
   2281 		}
   2282 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2283 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2284 		    ICH_FLASH_SECTOR_SIZE;
   2285 		sc->sc_ich8_flash_bank_size =
   2286 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2287 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2288 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2289 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2290 		sc->sc_flashreg_offset = 0;
   2291 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2292 		sc->phy.release = wm_put_swflag_ich8lan;
   2293 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2294 		sc->nvm.release = wm_put_nvm_ich8lan;
   2295 		break;
   2296 	case WM_T_PCH_SPT:
   2297 	case WM_T_PCH_CNP:
   2298 		sc->nvm.read = wm_nvm_read_spt;
   2299 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2300 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2301 		sc->sc_flasht = sc->sc_st;
   2302 		sc->sc_flashh = sc->sc_sh;
   2303 		sc->sc_ich8_flash_base = 0;
   2304 		sc->sc_nvm_wordsize =
   2305 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2306 		    * NVM_SIZE_MULTIPLIER;
   2307 		/* It is size in bytes, we want words */
   2308 		sc->sc_nvm_wordsize /= 2;
   2309 		/* Assume 2 banks */
   2310 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2311 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2312 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2313 		sc->phy.release = wm_put_swflag_ich8lan;
   2314 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2315 		sc->nvm.release = wm_put_nvm_ich8lan;
   2316 		break;
   2317 	case WM_T_I210:
   2318 	case WM_T_I211:
   2319 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2320 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2321 		if (wm_nvm_flash_presence_i210(sc)) {
   2322 			sc->nvm.read = wm_nvm_read_eerd;
   2323 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2324 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2325 			wm_nvm_set_addrbits_size_eecd(sc);
   2326 		} else {
   2327 			sc->nvm.read = wm_nvm_read_invm;
   2328 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2329 			sc->sc_nvm_wordsize = INVM_SIZE;
   2330 		}
   2331 		sc->phy.acquire = wm_get_phy_82575;
   2332 		sc->phy.release = wm_put_phy_82575;
   2333 		sc->nvm.acquire = wm_get_nvm_80003;
   2334 		sc->nvm.release = wm_put_nvm_80003;
   2335 		break;
   2336 	default:
   2337 		break;
   2338 	}
   2339 
   2340 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2341 	switch (sc->sc_type) {
   2342 	case WM_T_82571:
   2343 	case WM_T_82572:
   2344 		reg = CSR_READ(sc, WMREG_SWSM2);
   2345 		if ((reg & SWSM2_LOCK) == 0) {
   2346 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2347 			force_clear_smbi = true;
   2348 		} else
   2349 			force_clear_smbi = false;
   2350 		break;
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 		force_clear_smbi = true;
   2355 		break;
   2356 	default:
   2357 		force_clear_smbi = false;
   2358 		break;
   2359 	}
   2360 	if (force_clear_smbi) {
   2361 		reg = CSR_READ(sc, WMREG_SWSM);
   2362 		if ((reg & SWSM_SMBI) != 0)
   2363 			aprint_error_dev(sc->sc_dev,
   2364 			    "Please update the Bootagent\n");
   2365 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2366 	}
   2367 
   2368 	/*
   2369 	 * Defer printing the EEPROM type until after verifying the checksum
   2370 	 * This allows the EEPROM type to be printed correctly in the case
   2371 	 * that no EEPROM is attached.
   2372 	 */
   2373 	/*
   2374 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2375 	 * this for later, so we can fail future reads from the EEPROM.
   2376 	 */
   2377 	if (wm_nvm_validate_checksum(sc)) {
   2378 		/*
   2379 		 * Read twice again because some PCI-e parts fail the
   2380 		 * first check due to the link being in sleep state.
   2381 		 */
   2382 		if (wm_nvm_validate_checksum(sc))
   2383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2384 	}
   2385 
   2386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2388 	else {
   2389 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2390 		    sc->sc_nvm_wordsize);
   2391 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2392 			aprint_verbose("iNVM");
   2393 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2394 			aprint_verbose("FLASH(HW)");
   2395 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2396 			aprint_verbose("FLASH");
   2397 		else {
   2398 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2399 				eetype = "SPI";
   2400 			else
   2401 				eetype = "MicroWire";
   2402 			aprint_verbose("(%d address bits) %s EEPROM",
   2403 			    sc->sc_nvm_addrbits, eetype);
   2404 		}
   2405 	}
   2406 	wm_nvm_version(sc);
   2407 	aprint_verbose("\n");
   2408 
   2409 	/*
   2410 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2411 	 * incorrect.
   2412 	 */
   2413 	wm_gmii_setup_phytype(sc, 0, 0);
   2414 
   2415 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2416 	switch (sc->sc_type) {
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 	case WM_T_PCH_CNP:
   2425 		apme_mask = WUC_APME;
   2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2427 		if ((eeprom_data & apme_mask) != 0)
   2428 			sc->sc_flags |= WM_F_WOL;
   2429 		break;
   2430 	default:
   2431 		break;
   2432 	}
   2433 
   2434 	/* Reset the chip to a known state. */
   2435 	wm_reset(sc);
   2436 
   2437 	/*
   2438 	 * Check for I21[01] PLL workaround.
   2439 	 *
   2440 	 * Three cases:
   2441 	 * a) Chip is I211.
   2442 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2443 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2444 	 */
   2445 	if (sc->sc_type == WM_T_I211)
   2446 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2447 	if (sc->sc_type == WM_T_I210) {
   2448 		if (!wm_nvm_flash_presence_i210(sc))
   2449 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2450 		else if ((sc->sc_nvm_ver_major < 3)
   2451 		    || ((sc->sc_nvm_ver_major == 3)
   2452 			&& (sc->sc_nvm_ver_minor < 25))) {
   2453 			aprint_verbose_dev(sc->sc_dev,
   2454 			    "ROM image version %d.%d is older than 3.25\n",
   2455 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2456 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2457 		}
   2458 	}
   2459 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2460 		wm_pll_workaround_i210(sc);
   2461 
   2462 	wm_get_wakeup(sc);
   2463 
   2464 	/* Non-AMT based hardware can now take control from firmware */
   2465 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2466 		wm_get_hw_control(sc);
   2467 
   2468 	/*
   2469 	 * Read the Ethernet address from the EEPROM, if not first found
   2470 	 * in device properties.
   2471 	 */
   2472 	ea = prop_dictionary_get(dict, "mac-address");
   2473 	if (ea != NULL) {
   2474 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2475 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2476 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2477 	} else {
   2478 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2479 			aprint_error_dev(sc->sc_dev,
   2480 			    "unable to read Ethernet address\n");
   2481 			goto out;
   2482 		}
   2483 	}
   2484 
   2485 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2486 	    ether_sprintf(enaddr));
   2487 
   2488 	/*
   2489 	 * Read the config info from the EEPROM, and set up various
   2490 	 * bits in the control registers based on their contents.
   2491 	 */
   2492 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2493 	if (pn != NULL) {
   2494 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2495 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2496 	} else {
   2497 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2498 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2499 			goto out;
   2500 		}
   2501 	}
   2502 
   2503 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2504 	if (pn != NULL) {
   2505 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2506 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2507 	} else {
   2508 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2509 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2510 			goto out;
   2511 		}
   2512 	}
   2513 
   2514 	/* check for WM_F_WOL */
   2515 	switch (sc->sc_type) {
   2516 	case WM_T_82542_2_0:
   2517 	case WM_T_82542_2_1:
   2518 	case WM_T_82543:
   2519 		/* dummy? */
   2520 		eeprom_data = 0;
   2521 		apme_mask = NVM_CFG3_APME;
   2522 		break;
   2523 	case WM_T_82544:
   2524 		apme_mask = NVM_CFG2_82544_APM_EN;
   2525 		eeprom_data = cfg2;
   2526 		break;
   2527 	case WM_T_82546:
   2528 	case WM_T_82546_3:
   2529 	case WM_T_82571:
   2530 	case WM_T_82572:
   2531 	case WM_T_82573:
   2532 	case WM_T_82574:
   2533 	case WM_T_82583:
   2534 	case WM_T_80003:
   2535 	case WM_T_82575:
   2536 	case WM_T_82576:
   2537 		apme_mask = NVM_CFG3_APME;
   2538 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2539 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2540 		break;
   2541 	case WM_T_82580:
   2542 	case WM_T_I350:
   2543 	case WM_T_I354:
   2544 	case WM_T_I210:
   2545 	case WM_T_I211:
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc,
   2548 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2549 		    1, &eeprom_data);
   2550 		break;
   2551 	case WM_T_ICH8:
   2552 	case WM_T_ICH9:
   2553 	case WM_T_ICH10:
   2554 	case WM_T_PCH:
   2555 	case WM_T_PCH2:
   2556 	case WM_T_PCH_LPT:
   2557 	case WM_T_PCH_SPT:
   2558 	case WM_T_PCH_CNP:
   2559 		/* Already checked before wm_reset () */
   2560 		apme_mask = eeprom_data = 0;
   2561 		break;
   2562 	default: /* XXX 82540 */
   2563 		apme_mask = NVM_CFG3_APME;
   2564 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2565 		break;
   2566 	}
   2567 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2568 	if ((eeprom_data & apme_mask) != 0)
   2569 		sc->sc_flags |= WM_F_WOL;
   2570 
   2571 	/*
   2572 	 * We have the eeprom settings, now apply the special cases
   2573 	 * where the eeprom may be wrong or the board won't support
   2574 	 * wake on lan on a particular port
   2575 	 */
   2576 	switch (sc->sc_pcidevid) {
   2577 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2578 		sc->sc_flags &= ~WM_F_WOL;
   2579 		break;
   2580 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2581 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2582 		/* Wake events only supported on port A for dual fiber
   2583 		 * regardless of eeprom setting */
   2584 		if (sc->sc_funcid == 1)
   2585 			sc->sc_flags &= ~WM_F_WOL;
   2586 		break;
   2587 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2588 		/* If quad port adapter, disable WoL on all but port A */
   2589 		if (sc->sc_funcid != 0)
   2590 			sc->sc_flags &= ~WM_F_WOL;
   2591 		break;
   2592 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2593 		/* Wake events only supported on port A for dual fiber
   2594 		 * regardless of eeprom setting */
   2595 		if (sc->sc_funcid == 1)
   2596 			sc->sc_flags &= ~WM_F_WOL;
   2597 		break;
   2598 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2600 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2601 		/* If quad port adapter, disable WoL on all but port A */
   2602 		if (sc->sc_funcid != 0)
   2603 			sc->sc_flags &= ~WM_F_WOL;
   2604 		break;
   2605 	}
   2606 
   2607 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2608 		/* Check NVM for autonegotiation */
   2609 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2610 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2611 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2612 		}
   2613 	}
   2614 
   2615 	/*
   2616 	 * XXX need special handling for some multiple port cards
   2617 	 * to disable a paticular port.
   2618 	 */
   2619 
   2620 	if (sc->sc_type >= WM_T_82544) {
   2621 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2622 		if (pn != NULL) {
   2623 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2624 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2625 		} else {
   2626 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2627 				aprint_error_dev(sc->sc_dev,
   2628 				    "unable to read SWDPIN\n");
   2629 				goto out;
   2630 			}
   2631 		}
   2632 	}
   2633 
   2634 	if (cfg1 & NVM_CFG1_ILOS)
   2635 		sc->sc_ctrl |= CTRL_ILOS;
   2636 
   2637 	/*
   2638 	 * XXX
   2639 	 * This code isn't correct because pin 2 and 3 are located
   2640 	 * in different position on newer chips. Check all datasheet.
   2641 	 *
   2642 	 * Until resolve this problem, check if a chip < 82580
   2643 	 */
   2644 	if (sc->sc_type <= WM_T_82580) {
   2645 		if (sc->sc_type >= WM_T_82544) {
   2646 			sc->sc_ctrl |=
   2647 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2648 			    CTRL_SWDPIO_SHIFT;
   2649 			sc->sc_ctrl |=
   2650 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2651 			    CTRL_SWDPINS_SHIFT;
   2652 		} else {
   2653 			sc->sc_ctrl |=
   2654 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2655 			    CTRL_SWDPIO_SHIFT;
   2656 		}
   2657 	}
   2658 
   2659 	/* XXX For other than 82580? */
   2660 	if (sc->sc_type == WM_T_82580) {
   2661 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2662 		if (nvmword & __BIT(13))
   2663 			sc->sc_ctrl |= CTRL_ILOS;
   2664 	}
   2665 
   2666 #if 0
   2667 	if (sc->sc_type >= WM_T_82544) {
   2668 		if (cfg1 & NVM_CFG1_IPS0)
   2669 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2670 		if (cfg1 & NVM_CFG1_IPS1)
   2671 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2672 		sc->sc_ctrl_ext |=
   2673 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2674 		    CTRL_EXT_SWDPIO_SHIFT;
   2675 		sc->sc_ctrl_ext |=
   2676 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2677 		    CTRL_EXT_SWDPINS_SHIFT;
   2678 	} else {
   2679 		sc->sc_ctrl_ext |=
   2680 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2681 		    CTRL_EXT_SWDPIO_SHIFT;
   2682 	}
   2683 #endif
   2684 
   2685 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2686 #if 0
   2687 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2688 #endif
   2689 
   2690 	if (sc->sc_type == WM_T_PCH) {
   2691 		uint16_t val;
   2692 
   2693 		/* Save the NVM K1 bit setting */
   2694 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2695 
   2696 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2697 			sc->sc_nvm_k1_enabled = 1;
   2698 		else
   2699 			sc->sc_nvm_k1_enabled = 0;
   2700 	}
   2701 
   2702 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2703 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2704 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2705 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2706 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2707 	    || sc->sc_type == WM_T_82573
   2708 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2709 		/* Copper only */
   2710 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2711 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2712 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2713 	    || (sc->sc_type ==WM_T_I211)) {
   2714 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2715 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2716 		switch (link_mode) {
   2717 		case CTRL_EXT_LINK_MODE_1000KX:
   2718 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2719 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2720 			break;
   2721 		case CTRL_EXT_LINK_MODE_SGMII:
   2722 			if (wm_sgmii_uses_mdio(sc)) {
   2723 				aprint_verbose_dev(sc->sc_dev,
   2724 				    "SGMII(MDIO)\n");
   2725 				sc->sc_flags |= WM_F_SGMII;
   2726 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2727 				break;
   2728 			}
   2729 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2730 			/*FALLTHROUGH*/
   2731 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2732 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2733 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2734 				if (link_mode
   2735 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2736 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2737 					sc->sc_flags |= WM_F_SGMII;
   2738 				} else {
   2739 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2740 					aprint_verbose_dev(sc->sc_dev,
   2741 					    "SERDES\n");
   2742 				}
   2743 				break;
   2744 			}
   2745 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2746 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2747 
   2748 			/* Change current link mode setting */
   2749 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2750 			switch (sc->sc_mediatype) {
   2751 			case WM_MEDIATYPE_COPPER:
   2752 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2753 				break;
   2754 			case WM_MEDIATYPE_SERDES:
   2755 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2756 				break;
   2757 			default:
   2758 				break;
   2759 			}
   2760 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2761 			break;
   2762 		case CTRL_EXT_LINK_MODE_GMII:
   2763 		default:
   2764 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2765 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2766 			break;
   2767 		}
   2768 
   2769 		reg &= ~CTRL_EXT_I2C_ENA;
   2770 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2771 			reg |= CTRL_EXT_I2C_ENA;
   2772 		else
   2773 			reg &= ~CTRL_EXT_I2C_ENA;
   2774 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2775 	} else if (sc->sc_type < WM_T_82543 ||
   2776 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2777 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2778 			aprint_error_dev(sc->sc_dev,
   2779 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2780 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2781 		}
   2782 	} else {
   2783 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2784 			aprint_error_dev(sc->sc_dev,
   2785 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2786 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2787 		}
   2788 	}
   2789 
   2790 	if (sc->sc_type >= WM_T_PCH2)
   2791 		sc->sc_flags |= WM_F_EEE;
   2792 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2793 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2794 		/* XXX: Need special handling for I354. (not yet) */
   2795 		if (sc->sc_type != WM_T_I354)
   2796 			sc->sc_flags |= WM_F_EEE;
   2797 	}
   2798 
   2799 	/* Set device properties (macflags) */
   2800 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2801 
   2802 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2803 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2804 
   2805 	/* Initialize the media structures accordingly. */
   2806 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2807 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2808 	else
   2809 		wm_tbi_mediainit(sc); /* All others */
   2810 
   2811 	ifp = &sc->sc_ethercom.ec_if;
   2812 	xname = device_xname(sc->sc_dev);
   2813 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2814 	ifp->if_softc = sc;
   2815 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2816 #ifdef WM_MPSAFE
   2817 	ifp->if_extflags = IFEF_MPSAFE;
   2818 #endif
   2819 	ifp->if_ioctl = wm_ioctl;
   2820 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2821 		ifp->if_start = wm_nq_start;
   2822 		/*
   2823 		 * When the number of CPUs is one and the controller can use
   2824 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2825 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2826 		 * and the other is used for link status changing.
   2827 		 * In this situation, wm_nq_transmit() is disadvantageous
   2828 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2829 		 */
   2830 		if (wm_is_using_multiqueue(sc))
   2831 			ifp->if_transmit = wm_nq_transmit;
   2832 	} else {
   2833 		ifp->if_start = wm_start;
   2834 		/*
   2835 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2836 		 */
   2837 		if (wm_is_using_multiqueue(sc))
   2838 			ifp->if_transmit = wm_transmit;
   2839 	}
   2840 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2841 	ifp->if_init = wm_init;
   2842 	ifp->if_stop = wm_stop;
   2843 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2844 	IFQ_SET_READY(&ifp->if_snd);
   2845 
   2846 	/* Check for jumbo frame */
   2847 	switch (sc->sc_type) {
   2848 	case WM_T_82573:
   2849 		/* XXX limited to 9234 if ASPM is disabled */
   2850 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2851 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2852 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2853 		break;
   2854 	case WM_T_82571:
   2855 	case WM_T_82572:
   2856 	case WM_T_82574:
   2857 	case WM_T_82583:
   2858 	case WM_T_82575:
   2859 	case WM_T_82576:
   2860 	case WM_T_82580:
   2861 	case WM_T_I350:
   2862 	case WM_T_I354:
   2863 	case WM_T_I210:
   2864 	case WM_T_I211:
   2865 	case WM_T_80003:
   2866 	case WM_T_ICH9:
   2867 	case WM_T_ICH10:
   2868 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2869 	case WM_T_PCH_LPT:
   2870 	case WM_T_PCH_SPT:
   2871 	case WM_T_PCH_CNP:
   2872 		/* XXX limited to 9234 */
   2873 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2874 		break;
   2875 	case WM_T_PCH:
   2876 		/* XXX limited to 4096 */
   2877 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2878 		break;
   2879 	case WM_T_82542_2_0:
   2880 	case WM_T_82542_2_1:
   2881 	case WM_T_ICH8:
   2882 		/* No support for jumbo frame */
   2883 		break;
   2884 	default:
   2885 		/* ETHER_MAX_LEN_JUMBO */
   2886 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2887 		break;
   2888 	}
   2889 
   2890 	/* If we're a i82543 or greater, we can support VLANs. */
   2891 	if (sc->sc_type >= WM_T_82543) {
   2892 		sc->sc_ethercom.ec_capabilities |=
   2893 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2894 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2895 	}
   2896 
   2897 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2898 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2899 
   2900 	/*
   2901 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2902 	 * on i82543 and later.
   2903 	 */
   2904 	if (sc->sc_type >= WM_T_82543) {
   2905 		ifp->if_capabilities |=
   2906 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2907 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2908 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2909 		    IFCAP_CSUM_TCPv6_Tx |
   2910 		    IFCAP_CSUM_UDPv6_Tx;
   2911 	}
   2912 
   2913 	/*
   2914 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2915 	 *
   2916 	 *	82541GI (8086:1076) ... no
   2917 	 *	82572EI (8086:10b9) ... yes
   2918 	 */
   2919 	if (sc->sc_type >= WM_T_82571) {
   2920 		ifp->if_capabilities |=
   2921 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2922 	}
   2923 
   2924 	/*
   2925 	 * If we're a i82544 or greater (except i82547), we can do
   2926 	 * TCP segmentation offload.
   2927 	 */
   2928 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2929 		ifp->if_capabilities |= IFCAP_TSOv4;
   2930 	}
   2931 
   2932 	if (sc->sc_type >= WM_T_82571) {
   2933 		ifp->if_capabilities |= IFCAP_TSOv6;
   2934 	}
   2935 
   2936 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2937 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2938 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2939 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2940 
   2941 #ifdef WM_MPSAFE
   2942 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2943 #else
   2944 	sc->sc_core_lock = NULL;
   2945 #endif
   2946 
   2947 	/* Attach the interface. */
   2948 	error = if_initialize(ifp);
   2949 	if (error != 0) {
   2950 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2951 		    error);
   2952 		return; /* Error */
   2953 	}
   2954 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2955 	ether_ifattach(ifp, enaddr);
   2956 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2957 	if_register(ifp);
   2958 
   2959 #ifdef WM_EVENT_COUNTERS
   2960 	/* Attach event counters. */
   2961 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2962 	    NULL, xname, "linkintr");
   2963 
   2964 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2965 	    NULL, xname, "tx_xoff");
   2966 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2967 	    NULL, xname, "tx_xon");
   2968 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2969 	    NULL, xname, "rx_xoff");
   2970 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2971 	    NULL, xname, "rx_xon");
   2972 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2973 	    NULL, xname, "rx_macctl");
   2974 #endif /* WM_EVENT_COUNTERS */
   2975 
   2976 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2977 		pmf_class_network_register(self, ifp);
   2978 	else
   2979 		aprint_error_dev(self, "couldn't establish power handler\n");
   2980 
   2981 	sc->sc_flags |= WM_F_ATTACHED;
   2982 out:
   2983 	return;
   2984 }
   2985 
   2986 /* The detach function (ca_detach) */
   2987 static int
   2988 wm_detach(device_t self, int flags __unused)
   2989 {
   2990 	struct wm_softc *sc = device_private(self);
   2991 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2992 	int i;
   2993 
   2994 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2995 		return 0;
   2996 
   2997 	/* Stop the interface. Callouts are stopped in it. */
   2998 	wm_stop(ifp, 1);
   2999 
   3000 	pmf_device_deregister(self);
   3001 
   3002 #ifdef WM_EVENT_COUNTERS
   3003 	evcnt_detach(&sc->sc_ev_linkintr);
   3004 
   3005 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3006 	evcnt_detach(&sc->sc_ev_tx_xon);
   3007 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3008 	evcnt_detach(&sc->sc_ev_rx_xon);
   3009 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3010 #endif /* WM_EVENT_COUNTERS */
   3011 
   3012 	/* Tell the firmware about the release */
   3013 	WM_CORE_LOCK(sc);
   3014 	wm_release_manageability(sc);
   3015 	wm_release_hw_control(sc);
   3016 	wm_enable_wakeup(sc);
   3017 	WM_CORE_UNLOCK(sc);
   3018 
   3019 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3020 
   3021 	/* Delete all remaining media. */
   3022 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3023 
   3024 	ether_ifdetach(ifp);
   3025 	if_detach(ifp);
   3026 	if_percpuq_destroy(sc->sc_ipq);
   3027 
   3028 	/* Unload RX dmamaps and free mbufs */
   3029 	for (i = 0; i < sc->sc_nqueues; i++) {
   3030 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3031 		mutex_enter(rxq->rxq_lock);
   3032 		wm_rxdrain(rxq);
   3033 		mutex_exit(rxq->rxq_lock);
   3034 	}
   3035 	/* Must unlock here */
   3036 
   3037 	/* Disestablish the interrupt handler */
   3038 	for (i = 0; i < sc->sc_nintrs; i++) {
   3039 		if (sc->sc_ihs[i] != NULL) {
   3040 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3041 			sc->sc_ihs[i] = NULL;
   3042 		}
   3043 	}
   3044 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3045 
   3046 	wm_free_txrx_queues(sc);
   3047 
   3048 	/* Unmap the registers */
   3049 	if (sc->sc_ss) {
   3050 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3051 		sc->sc_ss = 0;
   3052 	}
   3053 	if (sc->sc_ios) {
   3054 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3055 		sc->sc_ios = 0;
   3056 	}
   3057 	if (sc->sc_flashs) {
   3058 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3059 		sc->sc_flashs = 0;
   3060 	}
   3061 
   3062 	if (sc->sc_core_lock)
   3063 		mutex_obj_free(sc->sc_core_lock);
   3064 	if (sc->sc_ich_phymtx)
   3065 		mutex_obj_free(sc->sc_ich_phymtx);
   3066 	if (sc->sc_ich_nvmmtx)
   3067 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3068 
   3069 	return 0;
   3070 }
   3071 
   3072 static bool
   3073 wm_suspend(device_t self, const pmf_qual_t *qual)
   3074 {
   3075 	struct wm_softc *sc = device_private(self);
   3076 
   3077 	wm_release_manageability(sc);
   3078 	wm_release_hw_control(sc);
   3079 	wm_enable_wakeup(sc);
   3080 
   3081 	return true;
   3082 }
   3083 
   3084 static bool
   3085 wm_resume(device_t self, const pmf_qual_t *qual)
   3086 {
   3087 	struct wm_softc *sc = device_private(self);
   3088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3089 	pcireg_t reg;
   3090 	char buf[256];
   3091 
   3092 	reg = CSR_READ(sc, WMREG_WUS);
   3093 	if (reg != 0) {
   3094 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3095 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3096 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3097 	}
   3098 
   3099 	if (sc->sc_type >= WM_T_PCH2)
   3100 		wm_resume_workarounds_pchlan(sc);
   3101 	if ((ifp->if_flags & IFF_UP) == 0) {
   3102 		wm_reset(sc);
   3103 		/* Non-AMT based hardware can now take control from firmware */
   3104 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3105 			wm_get_hw_control(sc);
   3106 		wm_init_manageability(sc);
   3107 	} else {
   3108 		/*
   3109 		 * We called pmf_class_network_register(), so if_init() is
   3110 		 * automatically called when IFF_UP. wm_reset(),
   3111 		 * wm_get_hw_control() and wm_init_manageability() are called
   3112 		 * via wm_init().
   3113 		 */
   3114 	}
   3115 
   3116 	return true;
   3117 }
   3118 
   3119 /*
   3120  * wm_watchdog:		[ifnet interface function]
   3121  *
   3122  *	Watchdog timer handler.
   3123  */
   3124 static void
   3125 wm_watchdog(struct ifnet *ifp)
   3126 {
   3127 	int qid;
   3128 	struct wm_softc *sc = ifp->if_softc;
   3129 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3130 
   3131 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3132 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3133 
   3134 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3135 	}
   3136 
   3137 	/* IF any of queues hanged up, reset the interface. */
   3138 	if (hang_queue != 0) {
   3139 		(void)wm_init(ifp);
   3140 
   3141 		/*
   3142 		 * There are still some upper layer processing which call
   3143 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3144 		 */
   3145 		/* Try to get more packets going. */
   3146 		ifp->if_start(ifp);
   3147 	}
   3148 }
   3149 
   3150 
   3151 static void
   3152 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3153 {
   3154 
   3155 	mutex_enter(txq->txq_lock);
   3156 	if (txq->txq_sending &&
   3157 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3158 		wm_watchdog_txq_locked(ifp, txq, hang);
   3159 
   3160 	mutex_exit(txq->txq_lock);
   3161 }
   3162 
   3163 static void
   3164 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3165     uint16_t *hang)
   3166 {
   3167 	struct wm_softc *sc = ifp->if_softc;
   3168 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3169 
   3170 	KASSERT(mutex_owned(txq->txq_lock));
   3171 
   3172 	/*
   3173 	 * Since we're using delayed interrupts, sweep up
   3174 	 * before we report an error.
   3175 	 */
   3176 	wm_txeof(txq, UINT_MAX);
   3177 
   3178 	if (txq->txq_sending)
   3179 		*hang |= __BIT(wmq->wmq_id);
   3180 
   3181 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3182 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3183 		    device_xname(sc->sc_dev));
   3184 	} else {
   3185 #ifdef WM_DEBUG
   3186 		int i, j;
   3187 		struct wm_txsoft *txs;
   3188 #endif
   3189 		log(LOG_ERR,
   3190 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3191 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3192 		    txq->txq_next);
   3193 		ifp->if_oerrors++;
   3194 #ifdef WM_DEBUG
   3195 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3196 		    i = WM_NEXTTXS(txq, i)) {
   3197 			txs = &txq->txq_soft[i];
   3198 			printf("txs %d tx %d -> %d\n",
   3199 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3200 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3201 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3202 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3203 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3204 					printf("\t %#08x%08x\n",
   3205 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3206 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3207 				} else {
   3208 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3209 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3210 					    txq->txq_descs[j].wtx_addr.wa_low);
   3211 					printf("\t %#04x%02x%02x%08x\n",
   3212 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3213 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3214 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3215 					    txq->txq_descs[j].wtx_cmdlen);
   3216 				}
   3217 				if (j == txs->txs_lastdesc)
   3218 					break;
   3219 			}
   3220 		}
   3221 #endif
   3222 	}
   3223 }
   3224 
   3225 /*
   3226  * wm_tick:
   3227  *
   3228  *	One second timer, used to check link status, sweep up
   3229  *	completed transmit jobs, etc.
   3230  */
   3231 static void
   3232 wm_tick(void *arg)
   3233 {
   3234 	struct wm_softc *sc = arg;
   3235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3236 #ifndef WM_MPSAFE
   3237 	int s = splnet();
   3238 #endif
   3239 
   3240 	WM_CORE_LOCK(sc);
   3241 
   3242 	if (sc->sc_core_stopping) {
   3243 		WM_CORE_UNLOCK(sc);
   3244 #ifndef WM_MPSAFE
   3245 		splx(s);
   3246 #endif
   3247 		return;
   3248 	}
   3249 
   3250 	if (sc->sc_type >= WM_T_82542_2_1) {
   3251 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3252 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3253 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3254 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3255 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3256 	}
   3257 
   3258 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3259 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3260 	    + CSR_READ(sc, WMREG_CRCERRS)
   3261 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3262 	    + CSR_READ(sc, WMREG_SYMERRC)
   3263 	    + CSR_READ(sc, WMREG_RXERRC)
   3264 	    + CSR_READ(sc, WMREG_SEC)
   3265 	    + CSR_READ(sc, WMREG_CEXTERR)
   3266 	    + CSR_READ(sc, WMREG_RLEC);
   3267 	/*
   3268 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3269 	 * memory. It does not mean the number of dropped packet. Because
   3270 	 * ethernet controller can receive packets in such case if there is
   3271 	 * space in phy's FIFO.
   3272 	 *
   3273 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3274 	 * own EVCNT instead of if_iqdrops.
   3275 	 */
   3276 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3277 
   3278 	if (sc->sc_flags & WM_F_HAS_MII)
   3279 		mii_tick(&sc->sc_mii);
   3280 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3281 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3282 		wm_serdes_tick(sc);
   3283 	else
   3284 		wm_tbi_tick(sc);
   3285 
   3286 	WM_CORE_UNLOCK(sc);
   3287 
   3288 	wm_watchdog(ifp);
   3289 
   3290 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3291 }
   3292 
   3293 static int
   3294 wm_ifflags_cb(struct ethercom *ec)
   3295 {
   3296 	struct ifnet *ifp = &ec->ec_if;
   3297 	struct wm_softc *sc = ifp->if_softc;
   3298 	int iffchange, ecchange;
   3299 	bool needreset = false;
   3300 	int rc = 0;
   3301 
   3302 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3303 		device_xname(sc->sc_dev), __func__));
   3304 
   3305 	WM_CORE_LOCK(sc);
   3306 
   3307 	/*
   3308 	 * Check for if_flags.
   3309 	 * Main usage is to prevent linkdown when opening bpf.
   3310 	 */
   3311 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3312 	sc->sc_if_flags = ifp->if_flags;
   3313 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3314 		needreset = true;
   3315 		goto ec;
   3316 	}
   3317 
   3318 	/* iff related updates */
   3319 	if ((iffchange & IFF_PROMISC) != 0)
   3320 		wm_set_filter(sc);
   3321 
   3322 	wm_set_vlan(sc);
   3323 
   3324 ec:
   3325 	/* Check for ec_capenable. */
   3326 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3327 	sc->sc_ec_capenable = ec->ec_capenable;
   3328 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3329 		needreset = true;
   3330 		goto out;
   3331 	}
   3332 
   3333 	/* ec related updates */
   3334 	wm_set_eee(sc);
   3335 
   3336 out:
   3337 	if (needreset)
   3338 		rc = ENETRESET;
   3339 	WM_CORE_UNLOCK(sc);
   3340 
   3341 	return rc;
   3342 }
   3343 
   3344 /*
   3345  * wm_ioctl:		[ifnet interface function]
   3346  *
   3347  *	Handle control requests from the operator.
   3348  */
   3349 static int
   3350 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3351 {
   3352 	struct wm_softc *sc = ifp->if_softc;
   3353 	struct ifreq *ifr = (struct ifreq *)data;
   3354 	struct ifaddr *ifa = (struct ifaddr *)data;
   3355 	struct sockaddr_dl *sdl;
   3356 	int s, error;
   3357 
   3358 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3359 		device_xname(sc->sc_dev), __func__));
   3360 
   3361 #ifndef WM_MPSAFE
   3362 	s = splnet();
   3363 #endif
   3364 	switch (cmd) {
   3365 	case SIOCSIFMEDIA:
   3366 		WM_CORE_LOCK(sc);
   3367 		/* Flow control requires full-duplex mode. */
   3368 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3369 		    (ifr->ifr_media & IFM_FDX) == 0)
   3370 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3371 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3372 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3373 				/* We can do both TXPAUSE and RXPAUSE. */
   3374 				ifr->ifr_media |=
   3375 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3376 			}
   3377 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3378 		}
   3379 		WM_CORE_UNLOCK(sc);
   3380 #ifdef WM_MPSAFE
   3381 		s = splnet();
   3382 #endif
   3383 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3384 #ifdef WM_MPSAFE
   3385 		splx(s);
   3386 #endif
   3387 		break;
   3388 	case SIOCINITIFADDR:
   3389 		WM_CORE_LOCK(sc);
   3390 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3391 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3392 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3393 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3394 			/* Unicast address is the first multicast entry */
   3395 			wm_set_filter(sc);
   3396 			error = 0;
   3397 			WM_CORE_UNLOCK(sc);
   3398 			break;
   3399 		}
   3400 		WM_CORE_UNLOCK(sc);
   3401 		/*FALLTHROUGH*/
   3402 	default:
   3403 #ifdef WM_MPSAFE
   3404 		s = splnet();
   3405 #endif
   3406 		/* It may call wm_start, so unlock here */
   3407 		error = ether_ioctl(ifp, cmd, data);
   3408 #ifdef WM_MPSAFE
   3409 		splx(s);
   3410 #endif
   3411 		if (error != ENETRESET)
   3412 			break;
   3413 
   3414 		error = 0;
   3415 
   3416 		if (cmd == SIOCSIFCAP)
   3417 			error = (*ifp->if_init)(ifp);
   3418 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3419 			;
   3420 		else if (ifp->if_flags & IFF_RUNNING) {
   3421 			/*
   3422 			 * Multicast list has changed; set the hardware filter
   3423 			 * accordingly.
   3424 			 */
   3425 			WM_CORE_LOCK(sc);
   3426 			wm_set_filter(sc);
   3427 			WM_CORE_UNLOCK(sc);
   3428 		}
   3429 		break;
   3430 	}
   3431 
   3432 #ifndef WM_MPSAFE
   3433 	splx(s);
   3434 #endif
   3435 	return error;
   3436 }
   3437 
   3438 /* MAC address related */
   3439 
   3440 /*
   3441  * Get the offset of MAC address and return it.
   3442  * If error occured, use offset 0.
   3443  */
   3444 static uint16_t
   3445 wm_check_alt_mac_addr(struct wm_softc *sc)
   3446 {
   3447 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3448 	uint16_t offset = NVM_OFF_MACADDR;
   3449 
   3450 	/* Try to read alternative MAC address pointer */
   3451 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3452 		return 0;
   3453 
   3454 	/* Check pointer if it's valid or not. */
   3455 	if ((offset == 0x0000) || (offset == 0xffff))
   3456 		return 0;
   3457 
   3458 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3459 	/*
   3460 	 * Check whether alternative MAC address is valid or not.
   3461 	 * Some cards have non 0xffff pointer but those don't use
   3462 	 * alternative MAC address in reality.
   3463 	 *
   3464 	 * Check whether the broadcast bit is set or not.
   3465 	 */
   3466 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3467 		if (((myea[0] & 0xff) & 0x01) == 0)
   3468 			return offset; /* Found */
   3469 
   3470 	/* Not found */
   3471 	return 0;
   3472 }
   3473 
   3474 static int
   3475 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3476 {
   3477 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3478 	uint16_t offset = NVM_OFF_MACADDR;
   3479 	int do_invert = 0;
   3480 
   3481 	switch (sc->sc_type) {
   3482 	case WM_T_82580:
   3483 	case WM_T_I350:
   3484 	case WM_T_I354:
   3485 		/* EEPROM Top Level Partitioning */
   3486 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3487 		break;
   3488 	case WM_T_82571:
   3489 	case WM_T_82575:
   3490 	case WM_T_82576:
   3491 	case WM_T_80003:
   3492 	case WM_T_I210:
   3493 	case WM_T_I211:
   3494 		offset = wm_check_alt_mac_addr(sc);
   3495 		if (offset == 0)
   3496 			if ((sc->sc_funcid & 0x01) == 1)
   3497 				do_invert = 1;
   3498 		break;
   3499 	default:
   3500 		if ((sc->sc_funcid & 0x01) == 1)
   3501 			do_invert = 1;
   3502 		break;
   3503 	}
   3504 
   3505 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3506 		goto bad;
   3507 
   3508 	enaddr[0] = myea[0] & 0xff;
   3509 	enaddr[1] = myea[0] >> 8;
   3510 	enaddr[2] = myea[1] & 0xff;
   3511 	enaddr[3] = myea[1] >> 8;
   3512 	enaddr[4] = myea[2] & 0xff;
   3513 	enaddr[5] = myea[2] >> 8;
   3514 
   3515 	/*
   3516 	 * Toggle the LSB of the MAC address on the second port
   3517 	 * of some dual port cards.
   3518 	 */
   3519 	if (do_invert != 0)
   3520 		enaddr[5] ^= 1;
   3521 
   3522 	return 0;
   3523 
   3524  bad:
   3525 	return -1;
   3526 }
   3527 
   3528 /*
   3529  * wm_set_ral:
   3530  *
   3531  *	Set an entery in the receive address list.
   3532  */
   3533 static void
   3534 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3535 {
   3536 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3537 	uint32_t wlock_mac;
   3538 	int rv;
   3539 
   3540 	if (enaddr != NULL) {
   3541 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3542 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3543 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3544 		ral_hi |= RAL_AV;
   3545 	} else {
   3546 		ral_lo = 0;
   3547 		ral_hi = 0;
   3548 	}
   3549 
   3550 	switch (sc->sc_type) {
   3551 	case WM_T_82542_2_0:
   3552 	case WM_T_82542_2_1:
   3553 	case WM_T_82543:
   3554 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3555 		CSR_WRITE_FLUSH(sc);
   3556 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3557 		CSR_WRITE_FLUSH(sc);
   3558 		break;
   3559 	case WM_T_PCH2:
   3560 	case WM_T_PCH_LPT:
   3561 	case WM_T_PCH_SPT:
   3562 	case WM_T_PCH_CNP:
   3563 		if (idx == 0) {
   3564 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3565 			CSR_WRITE_FLUSH(sc);
   3566 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3567 			CSR_WRITE_FLUSH(sc);
   3568 			return;
   3569 		}
   3570 		if (sc->sc_type != WM_T_PCH2) {
   3571 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3572 			    FWSM_WLOCK_MAC);
   3573 			addrl = WMREG_SHRAL(idx - 1);
   3574 			addrh = WMREG_SHRAH(idx - 1);
   3575 		} else {
   3576 			wlock_mac = 0;
   3577 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3578 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3579 		}
   3580 
   3581 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3582 			rv = wm_get_swflag_ich8lan(sc);
   3583 			if (rv != 0)
   3584 				return;
   3585 			CSR_WRITE(sc, addrl, ral_lo);
   3586 			CSR_WRITE_FLUSH(sc);
   3587 			CSR_WRITE(sc, addrh, ral_hi);
   3588 			CSR_WRITE_FLUSH(sc);
   3589 			wm_put_swflag_ich8lan(sc);
   3590 		}
   3591 
   3592 		break;
   3593 	default:
   3594 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3595 		CSR_WRITE_FLUSH(sc);
   3596 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3597 		CSR_WRITE_FLUSH(sc);
   3598 		break;
   3599 	}
   3600 }
   3601 
   3602 /*
   3603  * wm_mchash:
   3604  *
   3605  *	Compute the hash of the multicast address for the 4096-bit
   3606  *	multicast filter.
   3607  */
   3608 static uint32_t
   3609 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3610 {
   3611 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3612 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3613 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3614 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3615 	uint32_t hash;
   3616 
   3617 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3618 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3619 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3620 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3621 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3622 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3623 		return (hash & 0x3ff);
   3624 	}
   3625 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3626 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3627 
   3628 	return (hash & 0xfff);
   3629 }
   3630 
   3631 /*
   3632  *
   3633  *
   3634  */
   3635 static int
   3636 wm_rar_count(struct wm_softc *sc)
   3637 {
   3638 	int size;
   3639 
   3640 	switch (sc->sc_type) {
   3641 	case WM_T_ICH8:
   3642 		size = WM_RAL_TABSIZE_ICH8 -1;
   3643 		break;
   3644 	case WM_T_ICH9:
   3645 	case WM_T_ICH10:
   3646 	case WM_T_PCH:
   3647 		size = WM_RAL_TABSIZE_ICH8;
   3648 		break;
   3649 	case WM_T_PCH2:
   3650 		size = WM_RAL_TABSIZE_PCH2;
   3651 		break;
   3652 	case WM_T_PCH_LPT:
   3653 	case WM_T_PCH_SPT:
   3654 	case WM_T_PCH_CNP:
   3655 		size = WM_RAL_TABSIZE_PCH_LPT;
   3656 		break;
   3657 	case WM_T_82575:
   3658 	case WM_T_I210:
   3659 	case WM_T_I211:
   3660 		size = WM_RAL_TABSIZE_82575;
   3661 		break;
   3662 	case WM_T_82576:
   3663 	case WM_T_82580:
   3664 		size = WM_RAL_TABSIZE_82576;
   3665 		break;
   3666 	case WM_T_I350:
   3667 	case WM_T_I354:
   3668 		size = WM_RAL_TABSIZE_I350;
   3669 		break;
   3670 	default:
   3671 		size = WM_RAL_TABSIZE;
   3672 	}
   3673 
   3674 	return size;
   3675 }
   3676 
   3677 /*
   3678  * wm_set_filter:
   3679  *
   3680  *	Set up the receive filter.
   3681  */
   3682 static void
   3683 wm_set_filter(struct wm_softc *sc)
   3684 {
   3685 	struct ethercom *ec = &sc->sc_ethercom;
   3686 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3687 	struct ether_multi *enm;
   3688 	struct ether_multistep step;
   3689 	bus_addr_t mta_reg;
   3690 	uint32_t hash, reg, bit;
   3691 	int i, size, ralmax;
   3692 
   3693 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3694 		device_xname(sc->sc_dev), __func__));
   3695 
   3696 	if (sc->sc_type >= WM_T_82544)
   3697 		mta_reg = WMREG_CORDOVA_MTA;
   3698 	else
   3699 		mta_reg = WMREG_MTA;
   3700 
   3701 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3702 
   3703 	if (ifp->if_flags & IFF_BROADCAST)
   3704 		sc->sc_rctl |= RCTL_BAM;
   3705 	if (ifp->if_flags & IFF_PROMISC) {
   3706 		sc->sc_rctl |= RCTL_UPE;
   3707 		ETHER_LOCK(ec);
   3708 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3709 		ETHER_UNLOCK(ec);
   3710 		goto allmulti;
   3711 	}
   3712 
   3713 	/*
   3714 	 * Set the station address in the first RAL slot, and
   3715 	 * clear the remaining slots.
   3716 	 */
   3717 	size = wm_rar_count(sc);
   3718 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3719 
   3720 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3721 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3722 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3723 		switch (i) {
   3724 		case 0:
   3725 			/* We can use all entries */
   3726 			ralmax = size;
   3727 			break;
   3728 		case 1:
   3729 			/* Only RAR[0] */
   3730 			ralmax = 1;
   3731 			break;
   3732 		default:
   3733 			/* Available SHRA + RAR[0] */
   3734 			ralmax = i + 1;
   3735 		}
   3736 	} else
   3737 		ralmax = size;
   3738 	for (i = 1; i < size; i++) {
   3739 		if (i < ralmax)
   3740 			wm_set_ral(sc, NULL, i);
   3741 	}
   3742 
   3743 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3744 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3745 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3746 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3747 		size = WM_ICH8_MC_TABSIZE;
   3748 	else
   3749 		size = WM_MC_TABSIZE;
   3750 	/* Clear out the multicast table. */
   3751 	for (i = 0; i < size; i++) {
   3752 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3753 		CSR_WRITE_FLUSH(sc);
   3754 	}
   3755 
   3756 	ETHER_LOCK(ec);
   3757 	ETHER_FIRST_MULTI(step, ec, enm);
   3758 	while (enm != NULL) {
   3759 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3760 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3761 			ETHER_UNLOCK(ec);
   3762 			/*
   3763 			 * We must listen to a range of multicast addresses.
   3764 			 * For now, just accept all multicasts, rather than
   3765 			 * trying to set only those filter bits needed to match
   3766 			 * the range.  (At this time, the only use of address
   3767 			 * ranges is for IP multicast routing, for which the
   3768 			 * range is big enough to require all bits set.)
   3769 			 */
   3770 			goto allmulti;
   3771 		}
   3772 
   3773 		hash = wm_mchash(sc, enm->enm_addrlo);
   3774 
   3775 		reg = (hash >> 5);
   3776 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3777 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3778 		    || (sc->sc_type == WM_T_PCH2)
   3779 		    || (sc->sc_type == WM_T_PCH_LPT)
   3780 		    || (sc->sc_type == WM_T_PCH_SPT)
   3781 		    || (sc->sc_type == WM_T_PCH_CNP))
   3782 			reg &= 0x1f;
   3783 		else
   3784 			reg &= 0x7f;
   3785 		bit = hash & 0x1f;
   3786 
   3787 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3788 		hash |= 1U << bit;
   3789 
   3790 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3791 			/*
   3792 			 * 82544 Errata 9: Certain register cannot be written
   3793 			 * with particular alignments in PCI-X bus operation
   3794 			 * (FCAH, MTA and VFTA).
   3795 			 */
   3796 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3797 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3798 			CSR_WRITE_FLUSH(sc);
   3799 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3800 			CSR_WRITE_FLUSH(sc);
   3801 		} else {
   3802 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3803 			CSR_WRITE_FLUSH(sc);
   3804 		}
   3805 
   3806 		ETHER_NEXT_MULTI(step, enm);
   3807 	}
   3808 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3809 	ETHER_UNLOCK(ec);
   3810 
   3811 	goto setit;
   3812 
   3813  allmulti:
   3814 	sc->sc_rctl |= RCTL_MPE;
   3815 
   3816  setit:
   3817 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3818 }
   3819 
   3820 /* Reset and init related */
   3821 
   3822 static void
   3823 wm_set_vlan(struct wm_softc *sc)
   3824 {
   3825 
   3826 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3827 		device_xname(sc->sc_dev), __func__));
   3828 
   3829 	/* Deal with VLAN enables. */
   3830 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3831 		sc->sc_ctrl |= CTRL_VME;
   3832 	else
   3833 		sc->sc_ctrl &= ~CTRL_VME;
   3834 
   3835 	/* Write the control registers. */
   3836 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3837 }
   3838 
   3839 static void
   3840 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3841 {
   3842 	uint32_t gcr;
   3843 	pcireg_t ctrl2;
   3844 
   3845 	gcr = CSR_READ(sc, WMREG_GCR);
   3846 
   3847 	/* Only take action if timeout value is defaulted to 0 */
   3848 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3849 		goto out;
   3850 
   3851 	if ((gcr & GCR_CAP_VER2) == 0) {
   3852 		gcr |= GCR_CMPL_TMOUT_10MS;
   3853 		goto out;
   3854 	}
   3855 
   3856 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3857 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3858 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3859 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3860 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3861 
   3862 out:
   3863 	/* Disable completion timeout resend */
   3864 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3865 
   3866 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3867 }
   3868 
   3869 void
   3870 wm_get_auto_rd_done(struct wm_softc *sc)
   3871 {
   3872 	int i;
   3873 
   3874 	/* wait for eeprom to reload */
   3875 	switch (sc->sc_type) {
   3876 	case WM_T_82571:
   3877 	case WM_T_82572:
   3878 	case WM_T_82573:
   3879 	case WM_T_82574:
   3880 	case WM_T_82583:
   3881 	case WM_T_82575:
   3882 	case WM_T_82576:
   3883 	case WM_T_82580:
   3884 	case WM_T_I350:
   3885 	case WM_T_I354:
   3886 	case WM_T_I210:
   3887 	case WM_T_I211:
   3888 	case WM_T_80003:
   3889 	case WM_T_ICH8:
   3890 	case WM_T_ICH9:
   3891 		for (i = 0; i < 10; i++) {
   3892 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3893 				break;
   3894 			delay(1000);
   3895 		}
   3896 		if (i == 10) {
   3897 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3898 			    "complete\n", device_xname(sc->sc_dev));
   3899 		}
   3900 		break;
   3901 	default:
   3902 		break;
   3903 	}
   3904 }
   3905 
   3906 void
   3907 wm_lan_init_done(struct wm_softc *sc)
   3908 {
   3909 	uint32_t reg = 0;
   3910 	int i;
   3911 
   3912 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3913 		device_xname(sc->sc_dev), __func__));
   3914 
   3915 	/* Wait for eeprom to reload */
   3916 	switch (sc->sc_type) {
   3917 	case WM_T_ICH10:
   3918 	case WM_T_PCH:
   3919 	case WM_T_PCH2:
   3920 	case WM_T_PCH_LPT:
   3921 	case WM_T_PCH_SPT:
   3922 	case WM_T_PCH_CNP:
   3923 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3924 			reg = CSR_READ(sc, WMREG_STATUS);
   3925 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3926 				break;
   3927 			delay(100);
   3928 		}
   3929 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3930 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3931 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3932 		}
   3933 		break;
   3934 	default:
   3935 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3936 		    __func__);
   3937 		break;
   3938 	}
   3939 
   3940 	reg &= ~STATUS_LAN_INIT_DONE;
   3941 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3942 }
   3943 
   3944 void
   3945 wm_get_cfg_done(struct wm_softc *sc)
   3946 {
   3947 	int mask;
   3948 	uint32_t reg;
   3949 	int i;
   3950 
   3951 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3952 		device_xname(sc->sc_dev), __func__));
   3953 
   3954 	/* Wait for eeprom to reload */
   3955 	switch (sc->sc_type) {
   3956 	case WM_T_82542_2_0:
   3957 	case WM_T_82542_2_1:
   3958 		/* null */
   3959 		break;
   3960 	case WM_T_82543:
   3961 	case WM_T_82544:
   3962 	case WM_T_82540:
   3963 	case WM_T_82545:
   3964 	case WM_T_82545_3:
   3965 	case WM_T_82546:
   3966 	case WM_T_82546_3:
   3967 	case WM_T_82541:
   3968 	case WM_T_82541_2:
   3969 	case WM_T_82547:
   3970 	case WM_T_82547_2:
   3971 	case WM_T_82573:
   3972 	case WM_T_82574:
   3973 	case WM_T_82583:
   3974 		/* generic */
   3975 		delay(10*1000);
   3976 		break;
   3977 	case WM_T_80003:
   3978 	case WM_T_82571:
   3979 	case WM_T_82572:
   3980 	case WM_T_82575:
   3981 	case WM_T_82576:
   3982 	case WM_T_82580:
   3983 	case WM_T_I350:
   3984 	case WM_T_I354:
   3985 	case WM_T_I210:
   3986 	case WM_T_I211:
   3987 		if (sc->sc_type == WM_T_82571) {
   3988 			/* Only 82571 shares port 0 */
   3989 			mask = EEMNGCTL_CFGDONE_0;
   3990 		} else
   3991 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3992 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3993 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3994 				break;
   3995 			delay(1000);
   3996 		}
   3997 		if (i >= WM_PHY_CFG_TIMEOUT)
   3998 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3999 				device_xname(sc->sc_dev), __func__));
   4000 		break;
   4001 	case WM_T_ICH8:
   4002 	case WM_T_ICH9:
   4003 	case WM_T_ICH10:
   4004 	case WM_T_PCH:
   4005 	case WM_T_PCH2:
   4006 	case WM_T_PCH_LPT:
   4007 	case WM_T_PCH_SPT:
   4008 	case WM_T_PCH_CNP:
   4009 		delay(10*1000);
   4010 		if (sc->sc_type >= WM_T_ICH10)
   4011 			wm_lan_init_done(sc);
   4012 		else
   4013 			wm_get_auto_rd_done(sc);
   4014 
   4015 		/* Clear PHY Reset Asserted bit */
   4016 		reg = CSR_READ(sc, WMREG_STATUS);
   4017 		if ((reg & STATUS_PHYRA) != 0)
   4018 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4019 		break;
   4020 	default:
   4021 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4022 		    __func__);
   4023 		break;
   4024 	}
   4025 }
   4026 
   4027 int
   4028 wm_phy_post_reset(struct wm_softc *sc)
   4029 {
   4030 	device_t dev = sc->sc_dev;
   4031 	uint16_t reg;
   4032 	int rv = 0;
   4033 
   4034 	/* This function is only for ICH8 and newer. */
   4035 	if (sc->sc_type < WM_T_ICH8)
   4036 		return 0;
   4037 
   4038 	if (wm_phy_resetisblocked(sc)) {
   4039 		/* XXX */
   4040 		device_printf(dev, "PHY is blocked\n");
   4041 		return -1;
   4042 	}
   4043 
   4044 	/* Allow time for h/w to get to quiescent state after reset */
   4045 	delay(10*1000);
   4046 
   4047 	/* Perform any necessary post-reset workarounds */
   4048 	if (sc->sc_type == WM_T_PCH)
   4049 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4050 	else if (sc->sc_type == WM_T_PCH2)
   4051 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4052 	if (rv != 0)
   4053 		return rv;
   4054 
   4055 	/* Clear the host wakeup bit after lcd reset */
   4056 	if (sc->sc_type >= WM_T_PCH) {
   4057 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4058 		reg &= ~BM_WUC_HOST_WU_BIT;
   4059 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4060 	}
   4061 
   4062 	/* Configure the LCD with the extended configuration region in NVM */
   4063 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4064 		return rv;
   4065 
   4066 	/* Configure the LCD with the OEM bits in NVM */
   4067 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4068 
   4069 	if (sc->sc_type == WM_T_PCH2) {
   4070 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4071 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4072 			delay(10 * 1000);
   4073 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4074 		}
   4075 		/* Set EEE LPI Update Timer to 200usec */
   4076 		rv = sc->phy.acquire(sc);
   4077 		if (rv)
   4078 			return rv;
   4079 		rv = wm_write_emi_reg_locked(dev,
   4080 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4081 		sc->phy.release(sc);
   4082 	}
   4083 
   4084 	return rv;
   4085 }
   4086 
   4087 /* Only for PCH and newer */
   4088 static int
   4089 wm_write_smbus_addr(struct wm_softc *sc)
   4090 {
   4091 	uint32_t strap, freq;
   4092 	uint16_t phy_data;
   4093 	int rv;
   4094 
   4095 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4096 		device_xname(sc->sc_dev), __func__));
   4097 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4098 
   4099 	strap = CSR_READ(sc, WMREG_STRAP);
   4100 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4101 
   4102 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4103 	if (rv != 0)
   4104 		return -1;
   4105 
   4106 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4107 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4108 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4109 
   4110 	if (sc->sc_phytype == WMPHY_I217) {
   4111 		/* Restore SMBus frequency */
   4112 		if (freq --) {
   4113 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4114 			    | HV_SMB_ADDR_FREQ_HIGH);
   4115 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4116 			    HV_SMB_ADDR_FREQ_LOW);
   4117 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4118 			    HV_SMB_ADDR_FREQ_HIGH);
   4119 		} else
   4120 			DPRINTF(WM_DEBUG_INIT,
   4121 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4122 				device_xname(sc->sc_dev), __func__));
   4123 	}
   4124 
   4125 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4126 	    phy_data);
   4127 }
   4128 
   4129 static int
   4130 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4131 {
   4132 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4133 	uint16_t phy_page = 0;
   4134 	int rv = 0;
   4135 
   4136 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4137 		device_xname(sc->sc_dev), __func__));
   4138 
   4139 	switch (sc->sc_type) {
   4140 	case WM_T_ICH8:
   4141 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4142 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4143 			return 0;
   4144 
   4145 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4146 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4147 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4148 			break;
   4149 		}
   4150 		/* FALLTHROUGH */
   4151 	case WM_T_PCH:
   4152 	case WM_T_PCH2:
   4153 	case WM_T_PCH_LPT:
   4154 	case WM_T_PCH_SPT:
   4155 	case WM_T_PCH_CNP:
   4156 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4157 		break;
   4158 	default:
   4159 		return 0;
   4160 	}
   4161 
   4162 	if ((rv = sc->phy.acquire(sc)) != 0)
   4163 		return rv;
   4164 
   4165 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4166 	if ((reg & sw_cfg_mask) == 0)
   4167 		goto release;
   4168 
   4169 	/*
   4170 	 * Make sure HW does not configure LCD from PHY extended configuration
   4171 	 * before SW configuration
   4172 	 */
   4173 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4174 	if ((sc->sc_type < WM_T_PCH2)
   4175 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4176 		goto release;
   4177 
   4178 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4179 		device_xname(sc->sc_dev), __func__));
   4180 	/* word_addr is in DWORD */
   4181 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4182 
   4183 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4184 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4185 	if (cnf_size == 0)
   4186 		goto release;
   4187 
   4188 	if (((sc->sc_type == WM_T_PCH)
   4189 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4190 	    || (sc->sc_type > WM_T_PCH)) {
   4191 		/*
   4192 		 * HW configures the SMBus address and LEDs when the OEM and
   4193 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4194 		 * are cleared, SW will configure them instead.
   4195 		 */
   4196 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4197 			device_xname(sc->sc_dev), __func__));
   4198 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4199 			goto release;
   4200 
   4201 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4202 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4203 		    (uint16_t)reg);
   4204 		if (rv != 0)
   4205 			goto release;
   4206 	}
   4207 
   4208 	/* Configure LCD from extended configuration region. */
   4209 	for (i = 0; i < cnf_size; i++) {
   4210 		uint16_t reg_data, reg_addr;
   4211 
   4212 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4213 			goto release;
   4214 
   4215 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4216 			goto release;
   4217 
   4218 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4219 			phy_page = reg_data;
   4220 
   4221 		reg_addr &= IGPHY_MAXREGADDR;
   4222 		reg_addr |= phy_page;
   4223 
   4224 		KASSERT(sc->phy.writereg_locked != NULL);
   4225 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4226 		    reg_data);
   4227 	}
   4228 
   4229 release:
   4230 	sc->phy.release(sc);
   4231 	return rv;
   4232 }
   4233 
   4234 /*
   4235  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4236  *  @sc:       pointer to the HW structure
   4237  *  @d0_state: boolean if entering d0 or d3 device state
   4238  *
   4239  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4240  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4241  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4242  */
   4243 int
   4244 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4245 {
   4246 	uint32_t mac_reg;
   4247 	uint16_t oem_reg;
   4248 	int rv;
   4249 
   4250 	if (sc->sc_type < WM_T_PCH)
   4251 		return 0;
   4252 
   4253 	rv = sc->phy.acquire(sc);
   4254 	if (rv != 0)
   4255 		return rv;
   4256 
   4257 	if (sc->sc_type == WM_T_PCH) {
   4258 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4259 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4260 			goto release;
   4261 	}
   4262 
   4263 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4264 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4265 		goto release;
   4266 
   4267 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4268 
   4269 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4270 	if (rv != 0)
   4271 		goto release;
   4272 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4273 
   4274 	if (d0_state) {
   4275 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4276 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4277 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4278 			oem_reg |= HV_OEM_BITS_LPLU;
   4279 	} else {
   4280 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4281 		    != 0)
   4282 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4283 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4284 		    != 0)
   4285 			oem_reg |= HV_OEM_BITS_LPLU;
   4286 	}
   4287 
   4288 	/* Set Restart auto-neg to activate the bits */
   4289 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4290 	    && (wm_phy_resetisblocked(sc) == false))
   4291 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4292 
   4293 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4294 
   4295 release:
   4296 	sc->phy.release(sc);
   4297 
   4298 	return rv;
   4299 }
   4300 
   4301 /* Init hardware bits */
   4302 void
   4303 wm_initialize_hardware_bits(struct wm_softc *sc)
   4304 {
   4305 	uint32_t tarc0, tarc1, reg;
   4306 
   4307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4308 		device_xname(sc->sc_dev), __func__));
   4309 
   4310 	/* For 82571 variant, 80003 and ICHs */
   4311 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4312 	    || (sc->sc_type >= WM_T_80003)) {
   4313 
   4314 		/* Transmit Descriptor Control 0 */
   4315 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4316 		reg |= TXDCTL_COUNT_DESC;
   4317 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4318 
   4319 		/* Transmit Descriptor Control 1 */
   4320 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4321 		reg |= TXDCTL_COUNT_DESC;
   4322 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4323 
   4324 		/* TARC0 */
   4325 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4326 		switch (sc->sc_type) {
   4327 		case WM_T_82571:
   4328 		case WM_T_82572:
   4329 		case WM_T_82573:
   4330 		case WM_T_82574:
   4331 		case WM_T_82583:
   4332 		case WM_T_80003:
   4333 			/* Clear bits 30..27 */
   4334 			tarc0 &= ~__BITS(30, 27);
   4335 			break;
   4336 		default:
   4337 			break;
   4338 		}
   4339 
   4340 		switch (sc->sc_type) {
   4341 		case WM_T_82571:
   4342 		case WM_T_82572:
   4343 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4344 
   4345 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4346 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4347 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4348 			/* 8257[12] Errata No.7 */
   4349 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4350 
   4351 			/* TARC1 bit 28 */
   4352 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4353 				tarc1 &= ~__BIT(28);
   4354 			else
   4355 				tarc1 |= __BIT(28);
   4356 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4357 
   4358 			/*
   4359 			 * 8257[12] Errata No.13
   4360 			 * Disable Dyamic Clock Gating.
   4361 			 */
   4362 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4363 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4364 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4365 			break;
   4366 		case WM_T_82573:
   4367 		case WM_T_82574:
   4368 		case WM_T_82583:
   4369 			if ((sc->sc_type == WM_T_82574)
   4370 			    || (sc->sc_type == WM_T_82583))
   4371 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4372 
   4373 			/* Extended Device Control */
   4374 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4375 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4376 			reg |= __BIT(22);	/* Set bit 22 */
   4377 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4378 
   4379 			/* Device Control */
   4380 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4381 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4382 
   4383 			/* PCIe Control Register */
   4384 			/*
   4385 			 * 82573 Errata (unknown).
   4386 			 *
   4387 			 * 82574 Errata 25 and 82583 Errata 12
   4388 			 * "Dropped Rx Packets":
   4389 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4390 			 */
   4391 			reg = CSR_READ(sc, WMREG_GCR);
   4392 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4393 			CSR_WRITE(sc, WMREG_GCR, reg);
   4394 
   4395 			if ((sc->sc_type == WM_T_82574)
   4396 			    || (sc->sc_type == WM_T_82583)) {
   4397 				/*
   4398 				 * Document says this bit must be set for
   4399 				 * proper operation.
   4400 				 */
   4401 				reg = CSR_READ(sc, WMREG_GCR);
   4402 				reg |= __BIT(22);
   4403 				CSR_WRITE(sc, WMREG_GCR, reg);
   4404 
   4405 				/*
   4406 				 * Apply workaround for hardware errata
   4407 				 * documented in errata docs Fixes issue where
   4408 				 * some error prone or unreliable PCIe
   4409 				 * completions are occurring, particularly
   4410 				 * with ASPM enabled. Without fix, issue can
   4411 				 * cause Tx timeouts.
   4412 				 */
   4413 				reg = CSR_READ(sc, WMREG_GCR2);
   4414 				reg |= __BIT(0);
   4415 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4416 			}
   4417 			break;
   4418 		case WM_T_80003:
   4419 			/* TARC0 */
   4420 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4421 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4422 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4423 
   4424 			/* TARC1 bit 28 */
   4425 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4426 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4427 				tarc1 &= ~__BIT(28);
   4428 			else
   4429 				tarc1 |= __BIT(28);
   4430 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4431 			break;
   4432 		case WM_T_ICH8:
   4433 		case WM_T_ICH9:
   4434 		case WM_T_ICH10:
   4435 		case WM_T_PCH:
   4436 		case WM_T_PCH2:
   4437 		case WM_T_PCH_LPT:
   4438 		case WM_T_PCH_SPT:
   4439 		case WM_T_PCH_CNP:
   4440 			/* TARC0 */
   4441 			if (sc->sc_type == WM_T_ICH8) {
   4442 				/* Set TARC0 bits 29 and 28 */
   4443 				tarc0 |= __BITS(29, 28);
   4444 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4445 				tarc0 |= __BIT(29);
   4446 				/*
   4447 				 *  Drop bit 28. From Linux.
   4448 				 * See I218/I219 spec update
   4449 				 * "5. Buffer Overrun While the I219 is
   4450 				 * Processing DMA Transactions"
   4451 				 */
   4452 				tarc0 &= ~__BIT(28);
   4453 			}
   4454 			/* Set TARC0 bits 23,24,26,27 */
   4455 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4456 
   4457 			/* CTRL_EXT */
   4458 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4459 			reg |= __BIT(22);	/* Set bit 22 */
   4460 			/*
   4461 			 * Enable PHY low-power state when MAC is at D3
   4462 			 * w/o WoL
   4463 			 */
   4464 			if (sc->sc_type >= WM_T_PCH)
   4465 				reg |= CTRL_EXT_PHYPDEN;
   4466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4467 
   4468 			/* TARC1 */
   4469 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4470 			/* bit 28 */
   4471 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4472 				tarc1 &= ~__BIT(28);
   4473 			else
   4474 				tarc1 |= __BIT(28);
   4475 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4476 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4477 
   4478 			/* Device Status */
   4479 			if (sc->sc_type == WM_T_ICH8) {
   4480 				reg = CSR_READ(sc, WMREG_STATUS);
   4481 				reg &= ~__BIT(31);
   4482 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4483 
   4484 			}
   4485 
   4486 			/* IOSFPC */
   4487 			if (sc->sc_type == WM_T_PCH_SPT) {
   4488 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4489 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4490 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4491 			}
   4492 			/*
   4493 			 * Work-around descriptor data corruption issue during
   4494 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4495 			 * capability.
   4496 			 */
   4497 			reg = CSR_READ(sc, WMREG_RFCTL);
   4498 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4499 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4500 			break;
   4501 		default:
   4502 			break;
   4503 		}
   4504 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4505 
   4506 		switch (sc->sc_type) {
   4507 		/*
   4508 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4509 		 * Avoid RSS Hash Value bug.
   4510 		 */
   4511 		case WM_T_82571:
   4512 		case WM_T_82572:
   4513 		case WM_T_82573:
   4514 		case WM_T_80003:
   4515 		case WM_T_ICH8:
   4516 			reg = CSR_READ(sc, WMREG_RFCTL);
   4517 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4518 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4519 			break;
   4520 		case WM_T_82574:
   4521 			/* Use extened Rx descriptor. */
   4522 			reg = CSR_READ(sc, WMREG_RFCTL);
   4523 			reg |= WMREG_RFCTL_EXSTEN;
   4524 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4525 			break;
   4526 		default:
   4527 			break;
   4528 		}
   4529 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4530 		/*
   4531 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4532 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4533 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4534 		 * Correctly by the Device"
   4535 		 *
   4536 		 * I354(C2000) Errata AVR53:
   4537 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4538 		 * Hang"
   4539 		 */
   4540 		reg = CSR_READ(sc, WMREG_RFCTL);
   4541 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4542 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4543 	}
   4544 }
   4545 
   4546 static uint32_t
   4547 wm_rxpbs_adjust_82580(uint32_t val)
   4548 {
   4549 	uint32_t rv = 0;
   4550 
   4551 	if (val < __arraycount(wm_82580_rxpbs_table))
   4552 		rv = wm_82580_rxpbs_table[val];
   4553 
   4554 	return rv;
   4555 }
   4556 
   4557 /*
   4558  * wm_reset_phy:
   4559  *
   4560  *	generic PHY reset function.
   4561  *	Same as e1000_phy_hw_reset_generic()
   4562  */
   4563 static int
   4564 wm_reset_phy(struct wm_softc *sc)
   4565 {
   4566 	uint32_t reg;
   4567 
   4568 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4569 		device_xname(sc->sc_dev), __func__));
   4570 	if (wm_phy_resetisblocked(sc))
   4571 		return -1;
   4572 
   4573 	sc->phy.acquire(sc);
   4574 
   4575 	reg = CSR_READ(sc, WMREG_CTRL);
   4576 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4577 	CSR_WRITE_FLUSH(sc);
   4578 
   4579 	delay(sc->phy.reset_delay_us);
   4580 
   4581 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4582 	CSR_WRITE_FLUSH(sc);
   4583 
   4584 	delay(150);
   4585 
   4586 	sc->phy.release(sc);
   4587 
   4588 	wm_get_cfg_done(sc);
   4589 	wm_phy_post_reset(sc);
   4590 
   4591 	return 0;
   4592 }
   4593 
   4594 /*
   4595  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4596  * so it is enough to check sc->sc_queue[0] only.
   4597  */
   4598 static void
   4599 wm_flush_desc_rings(struct wm_softc *sc)
   4600 {
   4601 	pcireg_t preg;
   4602 	uint32_t reg;
   4603 	struct wm_txqueue *txq;
   4604 	wiseman_txdesc_t *txd;
   4605 	int nexttx;
   4606 	uint32_t rctl;
   4607 
   4608 	/* First, disable MULR fix in FEXTNVM11 */
   4609 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4610 	reg |= FEXTNVM11_DIS_MULRFIX;
   4611 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4612 
   4613 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4614 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4615 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4616 		return;
   4617 
   4618 	/* TX */
   4619 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4620 	    preg, reg);
   4621 	reg = CSR_READ(sc, WMREG_TCTL);
   4622 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4623 
   4624 	txq = &sc->sc_queue[0].wmq_txq;
   4625 	nexttx = txq->txq_next;
   4626 	txd = &txq->txq_descs[nexttx];
   4627 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4628 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4629 	txd->wtx_fields.wtxu_status = 0;
   4630 	txd->wtx_fields.wtxu_options = 0;
   4631 	txd->wtx_fields.wtxu_vlan = 0;
   4632 
   4633 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4634 	    BUS_SPACE_BARRIER_WRITE);
   4635 
   4636 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4637 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4638 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4639 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4640 	delay(250);
   4641 
   4642 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4643 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4644 		return;
   4645 
   4646 	/* RX */
   4647 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4648 	rctl = CSR_READ(sc, WMREG_RCTL);
   4649 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4650 	CSR_WRITE_FLUSH(sc);
   4651 	delay(150);
   4652 
   4653 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4654 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4655 	reg &= 0xffffc000;
   4656 	/*
   4657 	 * Update thresholds: prefetch threshold to 31, host threshold
   4658 	 * to 1 and make sure the granularity is "descriptors" and not
   4659 	 * "cache lines"
   4660 	 */
   4661 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4662 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4663 
   4664 	/* Momentarily enable the RX ring for the changes to take effect */
   4665 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4666 	CSR_WRITE_FLUSH(sc);
   4667 	delay(150);
   4668 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4669 }
   4670 
   4671 /*
   4672  * wm_reset:
   4673  *
   4674  *	Reset the i82542 chip.
   4675  */
   4676 static void
   4677 wm_reset(struct wm_softc *sc)
   4678 {
   4679 	int phy_reset = 0;
   4680 	int i, error = 0;
   4681 	uint32_t reg;
   4682 	uint16_t kmreg;
   4683 	int rv;
   4684 
   4685 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4686 		device_xname(sc->sc_dev), __func__));
   4687 	KASSERT(sc->sc_type != 0);
   4688 
   4689 	/*
   4690 	 * Allocate on-chip memory according to the MTU size.
   4691 	 * The Packet Buffer Allocation register must be written
   4692 	 * before the chip is reset.
   4693 	 */
   4694 	switch (sc->sc_type) {
   4695 	case WM_T_82547:
   4696 	case WM_T_82547_2:
   4697 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4698 		    PBA_22K : PBA_30K;
   4699 		for (i = 0; i < sc->sc_nqueues; i++) {
   4700 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4701 			txq->txq_fifo_head = 0;
   4702 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4703 			txq->txq_fifo_size =
   4704 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4705 			txq->txq_fifo_stall = 0;
   4706 		}
   4707 		break;
   4708 	case WM_T_82571:
   4709 	case WM_T_82572:
   4710 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4711 	case WM_T_80003:
   4712 		sc->sc_pba = PBA_32K;
   4713 		break;
   4714 	case WM_T_82573:
   4715 		sc->sc_pba = PBA_12K;
   4716 		break;
   4717 	case WM_T_82574:
   4718 	case WM_T_82583:
   4719 		sc->sc_pba = PBA_20K;
   4720 		break;
   4721 	case WM_T_82576:
   4722 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4723 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4724 		break;
   4725 	case WM_T_82580:
   4726 	case WM_T_I350:
   4727 	case WM_T_I354:
   4728 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4729 		break;
   4730 	case WM_T_I210:
   4731 	case WM_T_I211:
   4732 		sc->sc_pba = PBA_34K;
   4733 		break;
   4734 	case WM_T_ICH8:
   4735 		/* Workaround for a bit corruption issue in FIFO memory */
   4736 		sc->sc_pba = PBA_8K;
   4737 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4738 		break;
   4739 	case WM_T_ICH9:
   4740 	case WM_T_ICH10:
   4741 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4742 		    PBA_14K : PBA_10K;
   4743 		break;
   4744 	case WM_T_PCH:
   4745 	case WM_T_PCH2:	/* XXX 14K? */
   4746 	case WM_T_PCH_LPT:
   4747 	case WM_T_PCH_SPT:
   4748 	case WM_T_PCH_CNP:
   4749 		sc->sc_pba = PBA_26K;
   4750 		break;
   4751 	default:
   4752 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4753 		    PBA_40K : PBA_48K;
   4754 		break;
   4755 	}
   4756 	/*
   4757 	 * Only old or non-multiqueue devices have the PBA register
   4758 	 * XXX Need special handling for 82575.
   4759 	 */
   4760 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4761 	    || (sc->sc_type == WM_T_82575))
   4762 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4763 
   4764 	/* Prevent the PCI-E bus from sticking */
   4765 	if (sc->sc_flags & WM_F_PCIE) {
   4766 		int timeout = 800;
   4767 
   4768 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4769 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4770 
   4771 		while (timeout--) {
   4772 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4773 			    == 0)
   4774 				break;
   4775 			delay(100);
   4776 		}
   4777 		if (timeout == 0)
   4778 			device_printf(sc->sc_dev,
   4779 			    "failed to disable busmastering\n");
   4780 	}
   4781 
   4782 	/* Set the completion timeout for interface */
   4783 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4784 	    || (sc->sc_type == WM_T_82580)
   4785 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4786 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4787 		wm_set_pcie_completion_timeout(sc);
   4788 
   4789 	/* Clear interrupt */
   4790 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4791 	if (wm_is_using_msix(sc)) {
   4792 		if (sc->sc_type != WM_T_82574) {
   4793 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4794 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4795 		} else
   4796 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4797 	}
   4798 
   4799 	/* Stop the transmit and receive processes. */
   4800 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4801 	sc->sc_rctl &= ~RCTL_EN;
   4802 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4803 	CSR_WRITE_FLUSH(sc);
   4804 
   4805 	/* XXX set_tbi_sbp_82543() */
   4806 
   4807 	delay(10*1000);
   4808 
   4809 	/* Must acquire the MDIO ownership before MAC reset */
   4810 	switch (sc->sc_type) {
   4811 	case WM_T_82573:
   4812 	case WM_T_82574:
   4813 	case WM_T_82583:
   4814 		error = wm_get_hw_semaphore_82573(sc);
   4815 		break;
   4816 	default:
   4817 		break;
   4818 	}
   4819 
   4820 	/*
   4821 	 * 82541 Errata 29? & 82547 Errata 28?
   4822 	 * See also the description about PHY_RST bit in CTRL register
   4823 	 * in 8254x_GBe_SDM.pdf.
   4824 	 */
   4825 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4826 		CSR_WRITE(sc, WMREG_CTRL,
   4827 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4828 		CSR_WRITE_FLUSH(sc);
   4829 		delay(5000);
   4830 	}
   4831 
   4832 	switch (sc->sc_type) {
   4833 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4834 	case WM_T_82541:
   4835 	case WM_T_82541_2:
   4836 	case WM_T_82547:
   4837 	case WM_T_82547_2:
   4838 		/*
   4839 		 * On some chipsets, a reset through a memory-mapped write
   4840 		 * cycle can cause the chip to reset before completing the
   4841 		 * write cycle. This causes major headache that can be avoided
   4842 		 * by issuing the reset via indirect register writes through
   4843 		 * I/O space.
   4844 		 *
   4845 		 * So, if we successfully mapped the I/O BAR at attach time,
   4846 		 * use that. Otherwise, try our luck with a memory-mapped
   4847 		 * reset.
   4848 		 */
   4849 		if (sc->sc_flags & WM_F_IOH_VALID)
   4850 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4851 		else
   4852 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4853 		break;
   4854 	case WM_T_82545_3:
   4855 	case WM_T_82546_3:
   4856 		/* Use the shadow control register on these chips. */
   4857 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4858 		break;
   4859 	case WM_T_80003:
   4860 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4861 		sc->phy.acquire(sc);
   4862 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4863 		sc->phy.release(sc);
   4864 		break;
   4865 	case WM_T_ICH8:
   4866 	case WM_T_ICH9:
   4867 	case WM_T_ICH10:
   4868 	case WM_T_PCH:
   4869 	case WM_T_PCH2:
   4870 	case WM_T_PCH_LPT:
   4871 	case WM_T_PCH_SPT:
   4872 	case WM_T_PCH_CNP:
   4873 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4874 		if (wm_phy_resetisblocked(sc) == false) {
   4875 			/*
   4876 			 * Gate automatic PHY configuration by hardware on
   4877 			 * non-managed 82579
   4878 			 */
   4879 			if ((sc->sc_type == WM_T_PCH2)
   4880 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4881 				== 0))
   4882 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4883 
   4884 			reg |= CTRL_PHY_RESET;
   4885 			phy_reset = 1;
   4886 		} else
   4887 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4888 		sc->phy.acquire(sc);
   4889 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4890 		/* Don't insert a completion barrier when reset */
   4891 		delay(20*1000);
   4892 		mutex_exit(sc->sc_ich_phymtx);
   4893 		break;
   4894 	case WM_T_82580:
   4895 	case WM_T_I350:
   4896 	case WM_T_I354:
   4897 	case WM_T_I210:
   4898 	case WM_T_I211:
   4899 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4900 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4901 			CSR_WRITE_FLUSH(sc);
   4902 		delay(5000);
   4903 		break;
   4904 	case WM_T_82542_2_0:
   4905 	case WM_T_82542_2_1:
   4906 	case WM_T_82543:
   4907 	case WM_T_82540:
   4908 	case WM_T_82545:
   4909 	case WM_T_82546:
   4910 	case WM_T_82571:
   4911 	case WM_T_82572:
   4912 	case WM_T_82573:
   4913 	case WM_T_82574:
   4914 	case WM_T_82575:
   4915 	case WM_T_82576:
   4916 	case WM_T_82583:
   4917 	default:
   4918 		/* Everything else can safely use the documented method. */
   4919 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4920 		break;
   4921 	}
   4922 
   4923 	/* Must release the MDIO ownership after MAC reset */
   4924 	switch (sc->sc_type) {
   4925 	case WM_T_82573:
   4926 	case WM_T_82574:
   4927 	case WM_T_82583:
   4928 		if (error == 0)
   4929 			wm_put_hw_semaphore_82573(sc);
   4930 		break;
   4931 	default:
   4932 		break;
   4933 	}
   4934 
   4935 	/* Set Phy Config Counter to 50msec */
   4936 	if (sc->sc_type == WM_T_PCH2) {
   4937 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4938 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4939 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4940 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4941 	}
   4942 
   4943 	if (phy_reset != 0)
   4944 		wm_get_cfg_done(sc);
   4945 
   4946 	/* Reload EEPROM */
   4947 	switch (sc->sc_type) {
   4948 	case WM_T_82542_2_0:
   4949 	case WM_T_82542_2_1:
   4950 	case WM_T_82543:
   4951 	case WM_T_82544:
   4952 		delay(10);
   4953 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4954 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4955 		CSR_WRITE_FLUSH(sc);
   4956 		delay(2000);
   4957 		break;
   4958 	case WM_T_82540:
   4959 	case WM_T_82545:
   4960 	case WM_T_82545_3:
   4961 	case WM_T_82546:
   4962 	case WM_T_82546_3:
   4963 		delay(5*1000);
   4964 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4965 		break;
   4966 	case WM_T_82541:
   4967 	case WM_T_82541_2:
   4968 	case WM_T_82547:
   4969 	case WM_T_82547_2:
   4970 		delay(20000);
   4971 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4972 		break;
   4973 	case WM_T_82571:
   4974 	case WM_T_82572:
   4975 	case WM_T_82573:
   4976 	case WM_T_82574:
   4977 	case WM_T_82583:
   4978 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4979 			delay(10);
   4980 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4981 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4982 			CSR_WRITE_FLUSH(sc);
   4983 		}
   4984 		/* check EECD_EE_AUTORD */
   4985 		wm_get_auto_rd_done(sc);
   4986 		/*
   4987 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4988 		 * is set.
   4989 		 */
   4990 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4991 		    || (sc->sc_type == WM_T_82583))
   4992 			delay(25*1000);
   4993 		break;
   4994 	case WM_T_82575:
   4995 	case WM_T_82576:
   4996 	case WM_T_82580:
   4997 	case WM_T_I350:
   4998 	case WM_T_I354:
   4999 	case WM_T_I210:
   5000 	case WM_T_I211:
   5001 	case WM_T_80003:
   5002 		/* check EECD_EE_AUTORD */
   5003 		wm_get_auto_rd_done(sc);
   5004 		break;
   5005 	case WM_T_ICH8:
   5006 	case WM_T_ICH9:
   5007 	case WM_T_ICH10:
   5008 	case WM_T_PCH:
   5009 	case WM_T_PCH2:
   5010 	case WM_T_PCH_LPT:
   5011 	case WM_T_PCH_SPT:
   5012 	case WM_T_PCH_CNP:
   5013 		break;
   5014 	default:
   5015 		panic("%s: unknown type\n", __func__);
   5016 	}
   5017 
   5018 	/* Check whether EEPROM is present or not */
   5019 	switch (sc->sc_type) {
   5020 	case WM_T_82575:
   5021 	case WM_T_82576:
   5022 	case WM_T_82580:
   5023 	case WM_T_I350:
   5024 	case WM_T_I354:
   5025 	case WM_T_ICH8:
   5026 	case WM_T_ICH9:
   5027 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5028 			/* Not found */
   5029 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5030 			if (sc->sc_type == WM_T_82575)
   5031 				wm_reset_init_script_82575(sc);
   5032 		}
   5033 		break;
   5034 	default:
   5035 		break;
   5036 	}
   5037 
   5038 	if (phy_reset != 0)
   5039 		wm_phy_post_reset(sc);
   5040 
   5041 	if ((sc->sc_type == WM_T_82580)
   5042 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5043 		/* Clear global device reset status bit */
   5044 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5045 	}
   5046 
   5047 	/* Clear any pending interrupt events. */
   5048 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5049 	reg = CSR_READ(sc, WMREG_ICR);
   5050 	if (wm_is_using_msix(sc)) {
   5051 		if (sc->sc_type != WM_T_82574) {
   5052 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5053 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5054 		} else
   5055 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5056 	}
   5057 
   5058 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5059 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5060 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5061 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5062 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5063 		reg |= KABGTXD_BGSQLBIAS;
   5064 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5065 	}
   5066 
   5067 	/* Reload sc_ctrl */
   5068 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5069 
   5070 	wm_set_eee(sc);
   5071 
   5072 	/*
   5073 	 * For PCH, this write will make sure that any noise will be detected
   5074 	 * as a CRC error and be dropped rather than show up as a bad packet
   5075 	 * to the DMA engine
   5076 	 */
   5077 	if (sc->sc_type == WM_T_PCH)
   5078 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5079 
   5080 	if (sc->sc_type >= WM_T_82544)
   5081 		CSR_WRITE(sc, WMREG_WUC, 0);
   5082 
   5083 	if (sc->sc_type < WM_T_82575)
   5084 		wm_disable_aspm(sc); /* Workaround for some chips */
   5085 
   5086 	wm_reset_mdicnfg_82580(sc);
   5087 
   5088 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5089 		wm_pll_workaround_i210(sc);
   5090 
   5091 	if (sc->sc_type == WM_T_80003) {
   5092 		/* Default to TRUE to enable the MDIC W/A */
   5093 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5094 
   5095 		rv = wm_kmrn_readreg(sc,
   5096 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5097 		if (rv == 0) {
   5098 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5099 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5100 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5101 			else
   5102 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5103 		}
   5104 	}
   5105 }
   5106 
   5107 /*
   5108  * wm_add_rxbuf:
   5109  *
   5110  *	Add a receive buffer to the indiciated descriptor.
   5111  */
   5112 static int
   5113 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5114 {
   5115 	struct wm_softc *sc = rxq->rxq_sc;
   5116 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5117 	struct mbuf *m;
   5118 	int error;
   5119 
   5120 	KASSERT(mutex_owned(rxq->rxq_lock));
   5121 
   5122 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5123 	if (m == NULL)
   5124 		return ENOBUFS;
   5125 
   5126 	MCLGET(m, M_DONTWAIT);
   5127 	if ((m->m_flags & M_EXT) == 0) {
   5128 		m_freem(m);
   5129 		return ENOBUFS;
   5130 	}
   5131 
   5132 	if (rxs->rxs_mbuf != NULL)
   5133 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5134 
   5135 	rxs->rxs_mbuf = m;
   5136 
   5137 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5138 	/*
   5139 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5140 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5141 	 */
   5142 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5143 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5144 	if (error) {
   5145 		/* XXX XXX XXX */
   5146 		aprint_error_dev(sc->sc_dev,
   5147 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5148 		panic("wm_add_rxbuf");
   5149 	}
   5150 
   5151 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5152 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5153 
   5154 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5155 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5156 			wm_init_rxdesc(rxq, idx);
   5157 	} else
   5158 		wm_init_rxdesc(rxq, idx);
   5159 
   5160 	return 0;
   5161 }
   5162 
   5163 /*
   5164  * wm_rxdrain:
   5165  *
   5166  *	Drain the receive queue.
   5167  */
   5168 static void
   5169 wm_rxdrain(struct wm_rxqueue *rxq)
   5170 {
   5171 	struct wm_softc *sc = rxq->rxq_sc;
   5172 	struct wm_rxsoft *rxs;
   5173 	int i;
   5174 
   5175 	KASSERT(mutex_owned(rxq->rxq_lock));
   5176 
   5177 	for (i = 0; i < WM_NRXDESC; i++) {
   5178 		rxs = &rxq->rxq_soft[i];
   5179 		if (rxs->rxs_mbuf != NULL) {
   5180 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5181 			m_freem(rxs->rxs_mbuf);
   5182 			rxs->rxs_mbuf = NULL;
   5183 		}
   5184 	}
   5185 }
   5186 
   5187 /*
   5188  * Setup registers for RSS.
   5189  *
   5190  * XXX not yet VMDq support
   5191  */
   5192 static void
   5193 wm_init_rss(struct wm_softc *sc)
   5194 {
   5195 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5196 	int i;
   5197 
   5198 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5199 
   5200 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5201 		unsigned int qid, reta_ent;
   5202 
   5203 		qid  = i % sc->sc_nqueues;
   5204 		switch (sc->sc_type) {
   5205 		case WM_T_82574:
   5206 			reta_ent = __SHIFTIN(qid,
   5207 			    RETA_ENT_QINDEX_MASK_82574);
   5208 			break;
   5209 		case WM_T_82575:
   5210 			reta_ent = __SHIFTIN(qid,
   5211 			    RETA_ENT_QINDEX1_MASK_82575);
   5212 			break;
   5213 		default:
   5214 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5215 			break;
   5216 		}
   5217 
   5218 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5219 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5220 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5221 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5222 	}
   5223 
   5224 	rss_getkey((uint8_t *)rss_key);
   5225 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5226 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5227 
   5228 	if (sc->sc_type == WM_T_82574)
   5229 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5230 	else
   5231 		mrqc = MRQC_ENABLE_RSS_MQ;
   5232 
   5233 	/*
   5234 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5235 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5236 	 */
   5237 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5238 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5239 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5240 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5241 
   5242 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5243 }
   5244 
   5245 /*
   5246  * Adjust TX and RX queue numbers which the system actulally uses.
   5247  *
   5248  * The numbers are affected by below parameters.
   5249  *     - The nubmer of hardware queues
   5250  *     - The number of MSI-X vectors (= "nvectors" argument)
   5251  *     - ncpu
   5252  */
   5253 static void
   5254 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5255 {
   5256 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5257 
   5258 	if (nvectors < 2) {
   5259 		sc->sc_nqueues = 1;
   5260 		return;
   5261 	}
   5262 
   5263 	switch (sc->sc_type) {
   5264 	case WM_T_82572:
   5265 		hw_ntxqueues = 2;
   5266 		hw_nrxqueues = 2;
   5267 		break;
   5268 	case WM_T_82574:
   5269 		hw_ntxqueues = 2;
   5270 		hw_nrxqueues = 2;
   5271 		break;
   5272 	case WM_T_82575:
   5273 		hw_ntxqueues = 4;
   5274 		hw_nrxqueues = 4;
   5275 		break;
   5276 	case WM_T_82576:
   5277 		hw_ntxqueues = 16;
   5278 		hw_nrxqueues = 16;
   5279 		break;
   5280 	case WM_T_82580:
   5281 	case WM_T_I350:
   5282 	case WM_T_I354:
   5283 		hw_ntxqueues = 8;
   5284 		hw_nrxqueues = 8;
   5285 		break;
   5286 	case WM_T_I210:
   5287 		hw_ntxqueues = 4;
   5288 		hw_nrxqueues = 4;
   5289 		break;
   5290 	case WM_T_I211:
   5291 		hw_ntxqueues = 2;
   5292 		hw_nrxqueues = 2;
   5293 		break;
   5294 		/*
   5295 		 * As below ethernet controllers does not support MSI-X,
   5296 		 * this driver let them not use multiqueue.
   5297 		 *     - WM_T_80003
   5298 		 *     - WM_T_ICH8
   5299 		 *     - WM_T_ICH9
   5300 		 *     - WM_T_ICH10
   5301 		 *     - WM_T_PCH
   5302 		 *     - WM_T_PCH2
   5303 		 *     - WM_T_PCH_LPT
   5304 		 */
   5305 	default:
   5306 		hw_ntxqueues = 1;
   5307 		hw_nrxqueues = 1;
   5308 		break;
   5309 	}
   5310 
   5311 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5312 
   5313 	/*
   5314 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5315 	 * the number of queues used actually.
   5316 	 */
   5317 	if (nvectors < hw_nqueues + 1)
   5318 		sc->sc_nqueues = nvectors - 1;
   5319 	else
   5320 		sc->sc_nqueues = hw_nqueues;
   5321 
   5322 	/*
   5323 	 * As queues more then cpus cannot improve scaling, we limit
   5324 	 * the number of queues used actually.
   5325 	 */
   5326 	if (ncpu < sc->sc_nqueues)
   5327 		sc->sc_nqueues = ncpu;
   5328 }
   5329 
   5330 static inline bool
   5331 wm_is_using_msix(struct wm_softc *sc)
   5332 {
   5333 
   5334 	return (sc->sc_nintrs > 1);
   5335 }
   5336 
   5337 static inline bool
   5338 wm_is_using_multiqueue(struct wm_softc *sc)
   5339 {
   5340 
   5341 	return (sc->sc_nqueues > 1);
   5342 }
   5343 
   5344 static int
   5345 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5346 {
   5347 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5348 	wmq->wmq_id = qidx;
   5349 	wmq->wmq_intr_idx = intr_idx;
   5350 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5351 #ifdef WM_MPSAFE
   5352 	    | SOFTINT_MPSAFE
   5353 #endif
   5354 	    , wm_handle_queue, wmq);
   5355 	if (wmq->wmq_si != NULL)
   5356 		return 0;
   5357 
   5358 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5359 	    wmq->wmq_id);
   5360 
   5361 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5362 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5363 	return ENOMEM;
   5364 }
   5365 
   5366 /*
   5367  * Both single interrupt MSI and INTx can use this function.
   5368  */
   5369 static int
   5370 wm_setup_legacy(struct wm_softc *sc)
   5371 {
   5372 	pci_chipset_tag_t pc = sc->sc_pc;
   5373 	const char *intrstr = NULL;
   5374 	char intrbuf[PCI_INTRSTR_LEN];
   5375 	int error;
   5376 
   5377 	error = wm_alloc_txrx_queues(sc);
   5378 	if (error) {
   5379 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5380 		    error);
   5381 		return ENOMEM;
   5382 	}
   5383 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5384 	    sizeof(intrbuf));
   5385 #ifdef WM_MPSAFE
   5386 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5387 #endif
   5388 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5389 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5390 	if (sc->sc_ihs[0] == NULL) {
   5391 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5392 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5393 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5394 		return ENOMEM;
   5395 	}
   5396 
   5397 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5398 	sc->sc_nintrs = 1;
   5399 
   5400 	return wm_softint_establish(sc, 0, 0);
   5401 }
   5402 
   5403 static int
   5404 wm_setup_msix(struct wm_softc *sc)
   5405 {
   5406 	void *vih;
   5407 	kcpuset_t *affinity;
   5408 	int qidx, error, intr_idx, txrx_established;
   5409 	pci_chipset_tag_t pc = sc->sc_pc;
   5410 	const char *intrstr = NULL;
   5411 	char intrbuf[PCI_INTRSTR_LEN];
   5412 	char intr_xname[INTRDEVNAMEBUF];
   5413 
   5414 	if (sc->sc_nqueues < ncpu) {
   5415 		/*
   5416 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5417 		 * interrupts start from CPU#1.
   5418 		 */
   5419 		sc->sc_affinity_offset = 1;
   5420 	} else {
   5421 		/*
   5422 		 * In this case, this device use all CPUs. So, we unify
   5423 		 * affinitied cpu_index to msix vector number for readability.
   5424 		 */
   5425 		sc->sc_affinity_offset = 0;
   5426 	}
   5427 
   5428 	error = wm_alloc_txrx_queues(sc);
   5429 	if (error) {
   5430 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5431 		    error);
   5432 		return ENOMEM;
   5433 	}
   5434 
   5435 	kcpuset_create(&affinity, false);
   5436 	intr_idx = 0;
   5437 
   5438 	/*
   5439 	 * TX and RX
   5440 	 */
   5441 	txrx_established = 0;
   5442 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5443 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5444 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5445 
   5446 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5447 		    sizeof(intrbuf));
   5448 #ifdef WM_MPSAFE
   5449 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5450 		    PCI_INTR_MPSAFE, true);
   5451 #endif
   5452 		memset(intr_xname, 0, sizeof(intr_xname));
   5453 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5454 		    device_xname(sc->sc_dev), qidx);
   5455 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5456 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5457 		if (vih == NULL) {
   5458 			aprint_error_dev(sc->sc_dev,
   5459 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5460 			    intrstr ? " at " : "",
   5461 			    intrstr ? intrstr : "");
   5462 
   5463 			goto fail;
   5464 		}
   5465 		kcpuset_zero(affinity);
   5466 		/* Round-robin affinity */
   5467 		kcpuset_set(affinity, affinity_to);
   5468 		error = interrupt_distribute(vih, affinity, NULL);
   5469 		if (error == 0) {
   5470 			aprint_normal_dev(sc->sc_dev,
   5471 			    "for TX and RX interrupting at %s affinity to %u\n",
   5472 			    intrstr, affinity_to);
   5473 		} else {
   5474 			aprint_normal_dev(sc->sc_dev,
   5475 			    "for TX and RX interrupting at %s\n", intrstr);
   5476 		}
   5477 		sc->sc_ihs[intr_idx] = vih;
   5478 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5479 			goto fail;
   5480 		txrx_established++;
   5481 		intr_idx++;
   5482 	}
   5483 
   5484 	/* LINK */
   5485 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5486 	    sizeof(intrbuf));
   5487 #ifdef WM_MPSAFE
   5488 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5489 #endif
   5490 	memset(intr_xname, 0, sizeof(intr_xname));
   5491 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5492 	    device_xname(sc->sc_dev));
   5493 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5494 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5495 	if (vih == NULL) {
   5496 		aprint_error_dev(sc->sc_dev,
   5497 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5498 		    intrstr ? " at " : "",
   5499 		    intrstr ? intrstr : "");
   5500 
   5501 		goto fail;
   5502 	}
   5503 	/* Keep default affinity to LINK interrupt */
   5504 	aprint_normal_dev(sc->sc_dev,
   5505 	    "for LINK interrupting at %s\n", intrstr);
   5506 	sc->sc_ihs[intr_idx] = vih;
   5507 	sc->sc_link_intr_idx = intr_idx;
   5508 
   5509 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5510 	kcpuset_destroy(affinity);
   5511 	return 0;
   5512 
   5513  fail:
   5514 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5515 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5516 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5517 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5518 	}
   5519 
   5520 	kcpuset_destroy(affinity);
   5521 	return ENOMEM;
   5522 }
   5523 
   5524 static void
   5525 wm_unset_stopping_flags(struct wm_softc *sc)
   5526 {
   5527 	int i;
   5528 
   5529 	KASSERT(WM_CORE_LOCKED(sc));
   5530 
   5531 	/* Must unset stopping flags in ascending order. */
   5532 	for (i = 0; i < sc->sc_nqueues; i++) {
   5533 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5534 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5535 
   5536 		mutex_enter(txq->txq_lock);
   5537 		txq->txq_stopping = false;
   5538 		mutex_exit(txq->txq_lock);
   5539 
   5540 		mutex_enter(rxq->rxq_lock);
   5541 		rxq->rxq_stopping = false;
   5542 		mutex_exit(rxq->rxq_lock);
   5543 	}
   5544 
   5545 	sc->sc_core_stopping = false;
   5546 }
   5547 
   5548 static void
   5549 wm_set_stopping_flags(struct wm_softc *sc)
   5550 {
   5551 	int i;
   5552 
   5553 	KASSERT(WM_CORE_LOCKED(sc));
   5554 
   5555 	sc->sc_core_stopping = true;
   5556 
   5557 	/* Must set stopping flags in ascending order. */
   5558 	for (i = 0; i < sc->sc_nqueues; i++) {
   5559 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5560 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5561 
   5562 		mutex_enter(rxq->rxq_lock);
   5563 		rxq->rxq_stopping = true;
   5564 		mutex_exit(rxq->rxq_lock);
   5565 
   5566 		mutex_enter(txq->txq_lock);
   5567 		txq->txq_stopping = true;
   5568 		mutex_exit(txq->txq_lock);
   5569 	}
   5570 }
   5571 
   5572 /*
   5573  * Write interrupt interval value to ITR or EITR
   5574  */
   5575 static void
   5576 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5577 {
   5578 
   5579 	if (!wmq->wmq_set_itr)
   5580 		return;
   5581 
   5582 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5583 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5584 
   5585 		/*
   5586 		 * 82575 doesn't have CNT_INGR field.
   5587 		 * So, overwrite counter field by software.
   5588 		 */
   5589 		if (sc->sc_type == WM_T_82575)
   5590 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5591 		else
   5592 			eitr |= EITR_CNT_INGR;
   5593 
   5594 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5595 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5596 		/*
   5597 		 * 82574 has both ITR and EITR. SET EITR when we use
   5598 		 * the multi queue function with MSI-X.
   5599 		 */
   5600 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5601 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5602 	} else {
   5603 		KASSERT(wmq->wmq_id == 0);
   5604 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5605 	}
   5606 
   5607 	wmq->wmq_set_itr = false;
   5608 }
   5609 
   5610 /*
   5611  * TODO
   5612  * Below dynamic calculation of itr is almost the same as linux igb,
   5613  * however it does not fit to wm(4). So, we will have been disable AIM
   5614  * until we will find appropriate calculation of itr.
   5615  */
   5616 /*
   5617  * calculate interrupt interval value to be going to write register in
   5618  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5619  */
   5620 static void
   5621 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5622 {
   5623 #ifdef NOTYET
   5624 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5625 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5626 	uint32_t avg_size = 0;
   5627 	uint32_t new_itr;
   5628 
   5629 	if (rxq->rxq_packets)
   5630 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5631 	if (txq->txq_packets)
   5632 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5633 
   5634 	if (avg_size == 0) {
   5635 		new_itr = 450; /* restore default value */
   5636 		goto out;
   5637 	}
   5638 
   5639 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5640 	avg_size += 24;
   5641 
   5642 	/* Don't starve jumbo frames */
   5643 	avg_size = uimin(avg_size, 3000);
   5644 
   5645 	/* Give a little boost to mid-size frames */
   5646 	if ((avg_size > 300) && (avg_size < 1200))
   5647 		new_itr = avg_size / 3;
   5648 	else
   5649 		new_itr = avg_size / 2;
   5650 
   5651 out:
   5652 	/*
   5653 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5654 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5655 	 */
   5656 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5657 		new_itr *= 4;
   5658 
   5659 	if (new_itr != wmq->wmq_itr) {
   5660 		wmq->wmq_itr = new_itr;
   5661 		wmq->wmq_set_itr = true;
   5662 	} else
   5663 		wmq->wmq_set_itr = false;
   5664 
   5665 	rxq->rxq_packets = 0;
   5666 	rxq->rxq_bytes = 0;
   5667 	txq->txq_packets = 0;
   5668 	txq->txq_bytes = 0;
   5669 #endif
   5670 }
   5671 
   5672 /*
   5673  * wm_init:		[ifnet interface function]
   5674  *
   5675  *	Initialize the interface.
   5676  */
   5677 static int
   5678 wm_init(struct ifnet *ifp)
   5679 {
   5680 	struct wm_softc *sc = ifp->if_softc;
   5681 	int ret;
   5682 
   5683 	WM_CORE_LOCK(sc);
   5684 	ret = wm_init_locked(ifp);
   5685 	WM_CORE_UNLOCK(sc);
   5686 
   5687 	return ret;
   5688 }
   5689 
   5690 static int
   5691 wm_init_locked(struct ifnet *ifp)
   5692 {
   5693 	struct wm_softc *sc = ifp->if_softc;
   5694 	struct ethercom *ec = &sc->sc_ethercom;
   5695 	int i, j, trynum, error = 0;
   5696 	uint32_t reg;
   5697 
   5698 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5699 		device_xname(sc->sc_dev), __func__));
   5700 	KASSERT(WM_CORE_LOCKED(sc));
   5701 
   5702 	/*
   5703 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5704 	 * There is a small but measurable benefit to avoiding the adjusment
   5705 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5706 	 * on such platforms.  One possibility is that the DMA itself is
   5707 	 * slightly more efficient if the front of the entire packet (instead
   5708 	 * of the front of the headers) is aligned.
   5709 	 *
   5710 	 * Note we must always set align_tweak to 0 if we are using
   5711 	 * jumbo frames.
   5712 	 */
   5713 #ifdef __NO_STRICT_ALIGNMENT
   5714 	sc->sc_align_tweak = 0;
   5715 #else
   5716 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5717 		sc->sc_align_tweak = 0;
   5718 	else
   5719 		sc->sc_align_tweak = 2;
   5720 #endif /* __NO_STRICT_ALIGNMENT */
   5721 
   5722 	/* Cancel any pending I/O. */
   5723 	wm_stop_locked(ifp, 0);
   5724 
   5725 	/* Update statistics before reset */
   5726 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5727 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5728 
   5729 	/* PCH_SPT hardware workaround */
   5730 	if (sc->sc_type == WM_T_PCH_SPT)
   5731 		wm_flush_desc_rings(sc);
   5732 
   5733 	/* Reset the chip to a known state. */
   5734 	wm_reset(sc);
   5735 
   5736 	/*
   5737 	 * AMT based hardware can now take control from firmware
   5738 	 * Do this after reset.
   5739 	 */
   5740 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5741 		wm_get_hw_control(sc);
   5742 
   5743 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5744 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5745 		wm_legacy_irq_quirk_spt(sc);
   5746 
   5747 	/* Init hardware bits */
   5748 	wm_initialize_hardware_bits(sc);
   5749 
   5750 	/* Reset the PHY. */
   5751 	if (sc->sc_flags & WM_F_HAS_MII)
   5752 		wm_gmii_reset(sc);
   5753 
   5754 	if (sc->sc_type >= WM_T_ICH8) {
   5755 		reg = CSR_READ(sc, WMREG_GCR);
   5756 		/*
   5757 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5758 		 * default after reset.
   5759 		 */
   5760 		if (sc->sc_type == WM_T_ICH8)
   5761 			reg |= GCR_NO_SNOOP_ALL;
   5762 		else
   5763 			reg &= ~GCR_NO_SNOOP_ALL;
   5764 		CSR_WRITE(sc, WMREG_GCR, reg);
   5765 	}
   5766 	if ((sc->sc_type >= WM_T_ICH8)
   5767 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5768 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5769 
   5770 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5771 		reg |= CTRL_EXT_RO_DIS;
   5772 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5773 	}
   5774 
   5775 	/* Calculate (E)ITR value */
   5776 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5777 		/*
   5778 		 * For NEWQUEUE's EITR (except for 82575).
   5779 		 * 82575's EITR should be set same throttling value as other
   5780 		 * old controllers' ITR because the interrupt/sec calculation
   5781 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5782 		 *
   5783 		 * 82574's EITR should be set same throttling value as ITR.
   5784 		 *
   5785 		 * For N interrupts/sec, set this value to:
   5786 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5787 		 */
   5788 		sc->sc_itr_init = 450;
   5789 	} else if (sc->sc_type >= WM_T_82543) {
   5790 		/*
   5791 		 * Set up the interrupt throttling register (units of 256ns)
   5792 		 * Note that a footnote in Intel's documentation says this
   5793 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5794 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5795 		 * that that is also true for the 1024ns units of the other
   5796 		 * interrupt-related timer registers -- so, really, we ought
   5797 		 * to divide this value by 4 when the link speed is low.
   5798 		 *
   5799 		 * XXX implement this division at link speed change!
   5800 		 */
   5801 
   5802 		/*
   5803 		 * For N interrupts/sec, set this value to:
   5804 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5805 		 * absolute and packet timer values to this value
   5806 		 * divided by 4 to get "simple timer" behavior.
   5807 		 */
   5808 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5809 	}
   5810 
   5811 	error = wm_init_txrx_queues(sc);
   5812 	if (error)
   5813 		goto out;
   5814 
   5815 	/* Clear out the VLAN table -- we don't use it (yet). */
   5816 	CSR_WRITE(sc, WMREG_VET, 0);
   5817 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5818 		trynum = 10; /* Due to hw errata */
   5819 	else
   5820 		trynum = 1;
   5821 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5822 		for (j = 0; j < trynum; j++)
   5823 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5824 
   5825 	/*
   5826 	 * Set up flow-control parameters.
   5827 	 *
   5828 	 * XXX Values could probably stand some tuning.
   5829 	 */
   5830 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5831 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5832 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5833 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5834 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5835 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5836 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5837 	}
   5838 
   5839 	sc->sc_fcrtl = FCRTL_DFLT;
   5840 	if (sc->sc_type < WM_T_82543) {
   5841 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5842 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5843 	} else {
   5844 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5845 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5846 	}
   5847 
   5848 	if (sc->sc_type == WM_T_80003)
   5849 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5850 	else
   5851 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5852 
   5853 	/* Writes the control register. */
   5854 	wm_set_vlan(sc);
   5855 
   5856 	if (sc->sc_flags & WM_F_HAS_MII) {
   5857 		uint16_t kmreg;
   5858 
   5859 		switch (sc->sc_type) {
   5860 		case WM_T_80003:
   5861 		case WM_T_ICH8:
   5862 		case WM_T_ICH9:
   5863 		case WM_T_ICH10:
   5864 		case WM_T_PCH:
   5865 		case WM_T_PCH2:
   5866 		case WM_T_PCH_LPT:
   5867 		case WM_T_PCH_SPT:
   5868 		case WM_T_PCH_CNP:
   5869 			/*
   5870 			 * Set the mac to wait the maximum time between each
   5871 			 * iteration and increase the max iterations when
   5872 			 * polling the phy; this fixes erroneous timeouts at
   5873 			 * 10Mbps.
   5874 			 */
   5875 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5876 			    0xFFFF);
   5877 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5878 			    &kmreg);
   5879 			kmreg |= 0x3F;
   5880 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5881 			    kmreg);
   5882 			break;
   5883 		default:
   5884 			break;
   5885 		}
   5886 
   5887 		if (sc->sc_type == WM_T_80003) {
   5888 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5889 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5890 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5891 
   5892 			/* Bypass RX and TX FIFO's */
   5893 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5894 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5895 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5896 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5897 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5898 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5899 		}
   5900 	}
   5901 #if 0
   5902 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5903 #endif
   5904 
   5905 	/* Set up checksum offload parameters. */
   5906 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5907 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5908 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5909 		reg |= RXCSUM_IPOFL;
   5910 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5911 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5912 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5913 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5914 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5915 
   5916 	/* Set registers about MSI-X */
   5917 	if (wm_is_using_msix(sc)) {
   5918 		uint32_t ivar, qintr_idx;
   5919 		struct wm_queue *wmq;
   5920 		unsigned int qid;
   5921 
   5922 		if (sc->sc_type == WM_T_82575) {
   5923 			/* Interrupt control */
   5924 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5925 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5926 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5927 
   5928 			/* TX and RX */
   5929 			for (i = 0; i < sc->sc_nqueues; i++) {
   5930 				wmq = &sc->sc_queue[i];
   5931 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5932 				    EITR_TX_QUEUE(wmq->wmq_id)
   5933 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5934 			}
   5935 			/* Link status */
   5936 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5937 			    EITR_OTHER);
   5938 		} else if (sc->sc_type == WM_T_82574) {
   5939 			/* Interrupt control */
   5940 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5941 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5942 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5943 
   5944 			/*
   5945 			 * Workaround issue with spurious interrupts
   5946 			 * in MSI-X mode.
   5947 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5948 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5949 			 */
   5950 			reg = CSR_READ(sc, WMREG_RFCTL);
   5951 			reg |= WMREG_RFCTL_ACKDIS;
   5952 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5953 
   5954 			ivar = 0;
   5955 			/* TX and RX */
   5956 			for (i = 0; i < sc->sc_nqueues; i++) {
   5957 				wmq = &sc->sc_queue[i];
   5958 				qid = wmq->wmq_id;
   5959 				qintr_idx = wmq->wmq_intr_idx;
   5960 
   5961 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5962 				    IVAR_TX_MASK_Q_82574(qid));
   5963 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5964 				    IVAR_RX_MASK_Q_82574(qid));
   5965 			}
   5966 			/* Link status */
   5967 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5968 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5969 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5970 		} else {
   5971 			/* Interrupt control */
   5972 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5973 			    | GPIE_EIAME | GPIE_PBA);
   5974 
   5975 			switch (sc->sc_type) {
   5976 			case WM_T_82580:
   5977 			case WM_T_I350:
   5978 			case WM_T_I354:
   5979 			case WM_T_I210:
   5980 			case WM_T_I211:
   5981 				/* TX and RX */
   5982 				for (i = 0; i < sc->sc_nqueues; i++) {
   5983 					wmq = &sc->sc_queue[i];
   5984 					qid = wmq->wmq_id;
   5985 					qintr_idx = wmq->wmq_intr_idx;
   5986 
   5987 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5988 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5989 					ivar |= __SHIFTIN((qintr_idx
   5990 						| IVAR_VALID),
   5991 					    IVAR_TX_MASK_Q(qid));
   5992 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5993 					ivar |= __SHIFTIN((qintr_idx
   5994 						| IVAR_VALID),
   5995 					    IVAR_RX_MASK_Q(qid));
   5996 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5997 				}
   5998 				break;
   5999 			case WM_T_82576:
   6000 				/* TX and RX */
   6001 				for (i = 0; i < sc->sc_nqueues; i++) {
   6002 					wmq = &sc->sc_queue[i];
   6003 					qid = wmq->wmq_id;
   6004 					qintr_idx = wmq->wmq_intr_idx;
   6005 
   6006 					ivar = CSR_READ(sc,
   6007 					    WMREG_IVAR_Q_82576(qid));
   6008 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6009 					ivar |= __SHIFTIN((qintr_idx
   6010 						| IVAR_VALID),
   6011 					    IVAR_TX_MASK_Q_82576(qid));
   6012 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6013 					ivar |= __SHIFTIN((qintr_idx
   6014 						| IVAR_VALID),
   6015 					    IVAR_RX_MASK_Q_82576(qid));
   6016 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6017 					    ivar);
   6018 				}
   6019 				break;
   6020 			default:
   6021 				break;
   6022 			}
   6023 
   6024 			/* Link status */
   6025 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6026 			    IVAR_MISC_OTHER);
   6027 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6028 		}
   6029 
   6030 		if (wm_is_using_multiqueue(sc)) {
   6031 			wm_init_rss(sc);
   6032 
   6033 			/*
   6034 			** NOTE: Receive Full-Packet Checksum Offload
   6035 			** is mutually exclusive with Multiqueue. However
   6036 			** this is not the same as TCP/IP checksums which
   6037 			** still work.
   6038 			*/
   6039 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6040 			reg |= RXCSUM_PCSD;
   6041 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6042 		}
   6043 	}
   6044 
   6045 	/* Set up the interrupt registers. */
   6046 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6047 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6048 	    ICR_RXO | ICR_RXT0;
   6049 	if (wm_is_using_msix(sc)) {
   6050 		uint32_t mask;
   6051 		struct wm_queue *wmq;
   6052 
   6053 		switch (sc->sc_type) {
   6054 		case WM_T_82574:
   6055 			mask = 0;
   6056 			for (i = 0; i < sc->sc_nqueues; i++) {
   6057 				wmq = &sc->sc_queue[i];
   6058 				mask |= ICR_TXQ(wmq->wmq_id);
   6059 				mask |= ICR_RXQ(wmq->wmq_id);
   6060 			}
   6061 			mask |= ICR_OTHER;
   6062 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6063 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6064 			break;
   6065 		default:
   6066 			if (sc->sc_type == WM_T_82575) {
   6067 				mask = 0;
   6068 				for (i = 0; i < sc->sc_nqueues; i++) {
   6069 					wmq = &sc->sc_queue[i];
   6070 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6071 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6072 				}
   6073 				mask |= EITR_OTHER;
   6074 			} else {
   6075 				mask = 0;
   6076 				for (i = 0; i < sc->sc_nqueues; i++) {
   6077 					wmq = &sc->sc_queue[i];
   6078 					mask |= 1 << wmq->wmq_intr_idx;
   6079 				}
   6080 				mask |= 1 << sc->sc_link_intr_idx;
   6081 			}
   6082 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6083 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6084 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6085 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6086 			break;
   6087 		}
   6088 	} else
   6089 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6090 
   6091 	/* Set up the inter-packet gap. */
   6092 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6093 
   6094 	if (sc->sc_type >= WM_T_82543) {
   6095 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6096 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6097 			wm_itrs_writereg(sc, wmq);
   6098 		}
   6099 		/*
   6100 		 * Link interrupts occur much less than TX
   6101 		 * interrupts and RX interrupts. So, we don't
   6102 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6103 		 * FreeBSD's if_igb.
   6104 		 */
   6105 	}
   6106 
   6107 	/* Set the VLAN ethernetype. */
   6108 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6109 
   6110 	/*
   6111 	 * Set up the transmit control register; we start out with
   6112 	 * a collision distance suitable for FDX, but update it whe
   6113 	 * we resolve the media type.
   6114 	 */
   6115 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6116 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6117 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6118 	if (sc->sc_type >= WM_T_82571)
   6119 		sc->sc_tctl |= TCTL_MULR;
   6120 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6121 
   6122 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6123 		/* Write TDT after TCTL.EN is set. See the document. */
   6124 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6125 	}
   6126 
   6127 	if (sc->sc_type == WM_T_80003) {
   6128 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6129 		reg &= ~TCTL_EXT_GCEX_MASK;
   6130 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6131 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6132 	}
   6133 
   6134 	/* Set the media. */
   6135 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6136 		goto out;
   6137 
   6138 	/* Configure for OS presence */
   6139 	wm_init_manageability(sc);
   6140 
   6141 	/*
   6142 	 * Set up the receive control register; we actually program the
   6143 	 * register when we set the receive filter. Use multicast address
   6144 	 * offset type 0.
   6145 	 *
   6146 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6147 	 * don't enable that feature.
   6148 	 */
   6149 	sc->sc_mchash_type = 0;
   6150 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6151 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6152 
   6153 	/* 82574 use one buffer extended Rx descriptor. */
   6154 	if (sc->sc_type == WM_T_82574)
   6155 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6156 
   6157 	/*
   6158 	 * The I350 has a bug where it always strips the CRC whether
   6159 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6160 	 */
   6161 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6162 	    || (sc->sc_type == WM_T_I210))
   6163 		sc->sc_rctl |= RCTL_SECRC;
   6164 
   6165 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6166 	    && (ifp->if_mtu > ETHERMTU)) {
   6167 		sc->sc_rctl |= RCTL_LPE;
   6168 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6169 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6170 	}
   6171 
   6172 	if (MCLBYTES == 2048)
   6173 		sc->sc_rctl |= RCTL_2k;
   6174 	else {
   6175 		if (sc->sc_type >= WM_T_82543) {
   6176 			switch (MCLBYTES) {
   6177 			case 4096:
   6178 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6179 				break;
   6180 			case 8192:
   6181 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6182 				break;
   6183 			case 16384:
   6184 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6185 				break;
   6186 			default:
   6187 				panic("wm_init: MCLBYTES %d unsupported",
   6188 				    MCLBYTES);
   6189 				break;
   6190 			}
   6191 		} else
   6192 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6193 	}
   6194 
   6195 	/* Enable ECC */
   6196 	switch (sc->sc_type) {
   6197 	case WM_T_82571:
   6198 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6199 		reg |= PBA_ECC_CORR_EN;
   6200 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6201 		break;
   6202 	case WM_T_PCH_LPT:
   6203 	case WM_T_PCH_SPT:
   6204 	case WM_T_PCH_CNP:
   6205 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6206 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6207 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6208 
   6209 		sc->sc_ctrl |= CTRL_MEHE;
   6210 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6211 		break;
   6212 	default:
   6213 		break;
   6214 	}
   6215 
   6216 	/*
   6217 	 * Set the receive filter.
   6218 	 *
   6219 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6220 	 * the setting of RCTL.EN in wm_set_filter()
   6221 	 */
   6222 	wm_set_filter(sc);
   6223 
   6224 	/* On 575 and later set RDT only if RX enabled */
   6225 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6226 		int qidx;
   6227 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6228 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6229 			for (i = 0; i < WM_NRXDESC; i++) {
   6230 				mutex_enter(rxq->rxq_lock);
   6231 				wm_init_rxdesc(rxq, i);
   6232 				mutex_exit(rxq->rxq_lock);
   6233 
   6234 			}
   6235 		}
   6236 	}
   6237 
   6238 	wm_unset_stopping_flags(sc);
   6239 
   6240 	/* Start the one second link check clock. */
   6241 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6242 
   6243 	/* ...all done! */
   6244 	ifp->if_flags |= IFF_RUNNING;
   6245 	ifp->if_flags &= ~IFF_OACTIVE;
   6246 
   6247  out:
   6248 	/* Save last flags for the callback */
   6249 	sc->sc_if_flags = ifp->if_flags;
   6250 	sc->sc_ec_capenable = ec->ec_capenable;
   6251 	if (error)
   6252 		log(LOG_ERR, "%s: interface not running\n",
   6253 		    device_xname(sc->sc_dev));
   6254 	return error;
   6255 }
   6256 
   6257 /*
   6258  * wm_stop:		[ifnet interface function]
   6259  *
   6260  *	Stop transmission on the interface.
   6261  */
   6262 static void
   6263 wm_stop(struct ifnet *ifp, int disable)
   6264 {
   6265 	struct wm_softc *sc = ifp->if_softc;
   6266 
   6267 	WM_CORE_LOCK(sc);
   6268 	wm_stop_locked(ifp, disable);
   6269 	WM_CORE_UNLOCK(sc);
   6270 }
   6271 
   6272 static void
   6273 wm_stop_locked(struct ifnet *ifp, int disable)
   6274 {
   6275 	struct wm_softc *sc = ifp->if_softc;
   6276 	struct wm_txsoft *txs;
   6277 	int i, qidx;
   6278 
   6279 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6280 		device_xname(sc->sc_dev), __func__));
   6281 	KASSERT(WM_CORE_LOCKED(sc));
   6282 
   6283 	wm_set_stopping_flags(sc);
   6284 
   6285 	/* Stop the one second clock. */
   6286 	callout_stop(&sc->sc_tick_ch);
   6287 
   6288 	/* Stop the 82547 Tx FIFO stall check timer. */
   6289 	if (sc->sc_type == WM_T_82547)
   6290 		callout_stop(&sc->sc_txfifo_ch);
   6291 
   6292 	if (sc->sc_flags & WM_F_HAS_MII) {
   6293 		/* Down the MII. */
   6294 		mii_down(&sc->sc_mii);
   6295 	} else {
   6296 #if 0
   6297 		/* Should we clear PHY's status properly? */
   6298 		wm_reset(sc);
   6299 #endif
   6300 	}
   6301 
   6302 	/* Stop the transmit and receive processes. */
   6303 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6304 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6305 	sc->sc_rctl &= ~RCTL_EN;
   6306 
   6307 	/*
   6308 	 * Clear the interrupt mask to ensure the device cannot assert its
   6309 	 * interrupt line.
   6310 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6311 	 * service any currently pending or shared interrupt.
   6312 	 */
   6313 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6314 	sc->sc_icr = 0;
   6315 	if (wm_is_using_msix(sc)) {
   6316 		if (sc->sc_type != WM_T_82574) {
   6317 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6318 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6319 		} else
   6320 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6321 	}
   6322 
   6323 	/* Release any queued transmit buffers. */
   6324 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6325 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6326 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6327 		mutex_enter(txq->txq_lock);
   6328 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6329 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6330 			txs = &txq->txq_soft[i];
   6331 			if (txs->txs_mbuf != NULL) {
   6332 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6333 				m_freem(txs->txs_mbuf);
   6334 				txs->txs_mbuf = NULL;
   6335 			}
   6336 		}
   6337 		mutex_exit(txq->txq_lock);
   6338 	}
   6339 
   6340 	/* Mark the interface as down and cancel the watchdog timer. */
   6341 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6342 
   6343 	if (disable) {
   6344 		for (i = 0; i < sc->sc_nqueues; i++) {
   6345 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6346 			mutex_enter(rxq->rxq_lock);
   6347 			wm_rxdrain(rxq);
   6348 			mutex_exit(rxq->rxq_lock);
   6349 		}
   6350 	}
   6351 
   6352 #if 0 /* notyet */
   6353 	if (sc->sc_type >= WM_T_82544)
   6354 		CSR_WRITE(sc, WMREG_WUC, 0);
   6355 #endif
   6356 }
   6357 
   6358 static void
   6359 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6360 {
   6361 	struct mbuf *m;
   6362 	int i;
   6363 
   6364 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6365 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6366 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6367 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6368 		    m->m_data, m->m_len, m->m_flags);
   6369 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6370 	    i, i == 1 ? "" : "s");
   6371 }
   6372 
   6373 /*
   6374  * wm_82547_txfifo_stall:
   6375  *
   6376  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6377  *	reset the FIFO pointers, and restart packet transmission.
   6378  */
   6379 static void
   6380 wm_82547_txfifo_stall(void *arg)
   6381 {
   6382 	struct wm_softc *sc = arg;
   6383 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6384 
   6385 	mutex_enter(txq->txq_lock);
   6386 
   6387 	if (txq->txq_stopping)
   6388 		goto out;
   6389 
   6390 	if (txq->txq_fifo_stall) {
   6391 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6392 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6393 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6394 			/*
   6395 			 * Packets have drained.  Stop transmitter, reset
   6396 			 * FIFO pointers, restart transmitter, and kick
   6397 			 * the packet queue.
   6398 			 */
   6399 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6400 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6401 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6402 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6403 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6404 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6405 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6406 			CSR_WRITE_FLUSH(sc);
   6407 
   6408 			txq->txq_fifo_head = 0;
   6409 			txq->txq_fifo_stall = 0;
   6410 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6411 		} else {
   6412 			/*
   6413 			 * Still waiting for packets to drain; try again in
   6414 			 * another tick.
   6415 			 */
   6416 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6417 		}
   6418 	}
   6419 
   6420 out:
   6421 	mutex_exit(txq->txq_lock);
   6422 }
   6423 
   6424 /*
   6425  * wm_82547_txfifo_bugchk:
   6426  *
   6427  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6428  *	prevent enqueueing a packet that would wrap around the end
   6429  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6430  *
   6431  *	We do this by checking the amount of space before the end
   6432  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6433  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6434  *	the internal FIFO pointers to the beginning, and restart
   6435  *	transmission on the interface.
   6436  */
   6437 #define	WM_FIFO_HDR		0x10
   6438 #define	WM_82547_PAD_LEN	0x3e0
   6439 static int
   6440 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6441 {
   6442 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6443 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6444 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6445 
   6446 	/* Just return if already stalled. */
   6447 	if (txq->txq_fifo_stall)
   6448 		return 1;
   6449 
   6450 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6451 		/* Stall only occurs in half-duplex mode. */
   6452 		goto send_packet;
   6453 	}
   6454 
   6455 	if (len >= WM_82547_PAD_LEN + space) {
   6456 		txq->txq_fifo_stall = 1;
   6457 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6458 		return 1;
   6459 	}
   6460 
   6461  send_packet:
   6462 	txq->txq_fifo_head += len;
   6463 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6464 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6465 
   6466 	return 0;
   6467 }
   6468 
   6469 static int
   6470 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6471 {
   6472 	int error;
   6473 
   6474 	/*
   6475 	 * Allocate the control data structures, and create and load the
   6476 	 * DMA map for it.
   6477 	 *
   6478 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6479 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6480 	 * both sets within the same 4G segment.
   6481 	 */
   6482 	if (sc->sc_type < WM_T_82544)
   6483 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6484 	else
   6485 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6486 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6487 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6488 	else
   6489 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6490 
   6491 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6492 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6493 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6494 		aprint_error_dev(sc->sc_dev,
   6495 		    "unable to allocate TX control data, error = %d\n",
   6496 		    error);
   6497 		goto fail_0;
   6498 	}
   6499 
   6500 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6501 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6502 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6503 		aprint_error_dev(sc->sc_dev,
   6504 		    "unable to map TX control data, error = %d\n", error);
   6505 		goto fail_1;
   6506 	}
   6507 
   6508 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6509 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6510 		aprint_error_dev(sc->sc_dev,
   6511 		    "unable to create TX control data DMA map, error = %d\n",
   6512 		    error);
   6513 		goto fail_2;
   6514 	}
   6515 
   6516 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6517 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6518 		aprint_error_dev(sc->sc_dev,
   6519 		    "unable to load TX control data DMA map, error = %d\n",
   6520 		    error);
   6521 		goto fail_3;
   6522 	}
   6523 
   6524 	return 0;
   6525 
   6526  fail_3:
   6527 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6528  fail_2:
   6529 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6530 	    WM_TXDESCS_SIZE(txq));
   6531  fail_1:
   6532 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6533  fail_0:
   6534 	return error;
   6535 }
   6536 
   6537 static void
   6538 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6539 {
   6540 
   6541 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6542 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6543 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6544 	    WM_TXDESCS_SIZE(txq));
   6545 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6546 }
   6547 
   6548 static int
   6549 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6550 {
   6551 	int error;
   6552 	size_t rxq_descs_size;
   6553 
   6554 	/*
   6555 	 * Allocate the control data structures, and create and load the
   6556 	 * DMA map for it.
   6557 	 *
   6558 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6559 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6560 	 * both sets within the same 4G segment.
   6561 	 */
   6562 	rxq->rxq_ndesc = WM_NRXDESC;
   6563 	if (sc->sc_type == WM_T_82574)
   6564 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6565 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6566 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6567 	else
   6568 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6569 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6570 
   6571 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6572 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6573 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6574 		aprint_error_dev(sc->sc_dev,
   6575 		    "unable to allocate RX control data, error = %d\n",
   6576 		    error);
   6577 		goto fail_0;
   6578 	}
   6579 
   6580 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6581 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6582 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6583 		aprint_error_dev(sc->sc_dev,
   6584 		    "unable to map RX control data, error = %d\n", error);
   6585 		goto fail_1;
   6586 	}
   6587 
   6588 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6589 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6590 		aprint_error_dev(sc->sc_dev,
   6591 		    "unable to create RX control data DMA map, error = %d\n",
   6592 		    error);
   6593 		goto fail_2;
   6594 	}
   6595 
   6596 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6597 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6598 		aprint_error_dev(sc->sc_dev,
   6599 		    "unable to load RX control data DMA map, error = %d\n",
   6600 		    error);
   6601 		goto fail_3;
   6602 	}
   6603 
   6604 	return 0;
   6605 
   6606  fail_3:
   6607 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6608  fail_2:
   6609 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6610 	    rxq_descs_size);
   6611  fail_1:
   6612 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6613  fail_0:
   6614 	return error;
   6615 }
   6616 
   6617 static void
   6618 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6619 {
   6620 
   6621 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6622 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6623 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6624 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6625 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6626 }
   6627 
   6628 
   6629 static int
   6630 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6631 {
   6632 	int i, error;
   6633 
   6634 	/* Create the transmit buffer DMA maps. */
   6635 	WM_TXQUEUELEN(txq) =
   6636 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6637 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6638 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6639 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6640 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6641 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6642 			aprint_error_dev(sc->sc_dev,
   6643 			    "unable to create Tx DMA map %d, error = %d\n",
   6644 			    i, error);
   6645 			goto fail;
   6646 		}
   6647 	}
   6648 
   6649 	return 0;
   6650 
   6651  fail:
   6652 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6653 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6654 			bus_dmamap_destroy(sc->sc_dmat,
   6655 			    txq->txq_soft[i].txs_dmamap);
   6656 	}
   6657 	return error;
   6658 }
   6659 
   6660 static void
   6661 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6662 {
   6663 	int i;
   6664 
   6665 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6666 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6667 			bus_dmamap_destroy(sc->sc_dmat,
   6668 			    txq->txq_soft[i].txs_dmamap);
   6669 	}
   6670 }
   6671 
   6672 static int
   6673 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6674 {
   6675 	int i, error;
   6676 
   6677 	/* Create the receive buffer DMA maps. */
   6678 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6679 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6680 			    MCLBYTES, 0, 0,
   6681 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6682 			aprint_error_dev(sc->sc_dev,
   6683 			    "unable to create Rx DMA map %d error = %d\n",
   6684 			    i, error);
   6685 			goto fail;
   6686 		}
   6687 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6688 	}
   6689 
   6690 	return 0;
   6691 
   6692  fail:
   6693 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6694 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6695 			bus_dmamap_destroy(sc->sc_dmat,
   6696 			    rxq->rxq_soft[i].rxs_dmamap);
   6697 	}
   6698 	return error;
   6699 }
   6700 
   6701 static void
   6702 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6703 {
   6704 	int i;
   6705 
   6706 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6707 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6708 			bus_dmamap_destroy(sc->sc_dmat,
   6709 			    rxq->rxq_soft[i].rxs_dmamap);
   6710 	}
   6711 }
   6712 
   6713 /*
   6714  * wm_alloc_quques:
   6715  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6716  */
   6717 static int
   6718 wm_alloc_txrx_queues(struct wm_softc *sc)
   6719 {
   6720 	int i, error, tx_done, rx_done;
   6721 
   6722 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6723 	    KM_SLEEP);
   6724 	if (sc->sc_queue == NULL) {
   6725 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6726 		error = ENOMEM;
   6727 		goto fail_0;
   6728 	}
   6729 
   6730 	/* For transmission */
   6731 	error = 0;
   6732 	tx_done = 0;
   6733 	for (i = 0; i < sc->sc_nqueues; i++) {
   6734 #ifdef WM_EVENT_COUNTERS
   6735 		int j;
   6736 		const char *xname;
   6737 #endif
   6738 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6739 		txq->txq_sc = sc;
   6740 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6741 
   6742 		error = wm_alloc_tx_descs(sc, txq);
   6743 		if (error)
   6744 			break;
   6745 		error = wm_alloc_tx_buffer(sc, txq);
   6746 		if (error) {
   6747 			wm_free_tx_descs(sc, txq);
   6748 			break;
   6749 		}
   6750 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6751 		if (txq->txq_interq == NULL) {
   6752 			wm_free_tx_descs(sc, txq);
   6753 			wm_free_tx_buffer(sc, txq);
   6754 			error = ENOMEM;
   6755 			break;
   6756 		}
   6757 
   6758 #ifdef WM_EVENT_COUNTERS
   6759 		xname = device_xname(sc->sc_dev);
   6760 
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6762 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6764 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6765 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6772 
   6773 		for (j = 0; j < WM_NTXSEGS; j++) {
   6774 			snprintf(txq->txq_txseg_evcnt_names[j],
   6775 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6776 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6777 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6778 		}
   6779 
   6780 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6781 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6782 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6783 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6785 #endif /* WM_EVENT_COUNTERS */
   6786 
   6787 		tx_done++;
   6788 	}
   6789 	if (error)
   6790 		goto fail_1;
   6791 
   6792 	/* For receive */
   6793 	error = 0;
   6794 	rx_done = 0;
   6795 	for (i = 0; i < sc->sc_nqueues; i++) {
   6796 #ifdef WM_EVENT_COUNTERS
   6797 		const char *xname;
   6798 #endif
   6799 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6800 		rxq->rxq_sc = sc;
   6801 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6802 
   6803 		error = wm_alloc_rx_descs(sc, rxq);
   6804 		if (error)
   6805 			break;
   6806 
   6807 		error = wm_alloc_rx_buffer(sc, rxq);
   6808 		if (error) {
   6809 			wm_free_rx_descs(sc, rxq);
   6810 			break;
   6811 		}
   6812 
   6813 #ifdef WM_EVENT_COUNTERS
   6814 		xname = device_xname(sc->sc_dev);
   6815 
   6816 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6817 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6818 
   6819 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6820 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6821 #endif /* WM_EVENT_COUNTERS */
   6822 
   6823 		rx_done++;
   6824 	}
   6825 	if (error)
   6826 		goto fail_2;
   6827 
   6828 	for (i = 0; i < sc->sc_nqueues; i++) {
   6829 		char rndname[16];
   6830 
   6831 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6832 		    device_xname(sc->sc_dev), i);
   6833 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6834 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6835 	}
   6836 
   6837 	return 0;
   6838 
   6839  fail_2:
   6840 	for (i = 0; i < rx_done; i++) {
   6841 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6842 		wm_free_rx_buffer(sc, rxq);
   6843 		wm_free_rx_descs(sc, rxq);
   6844 		if (rxq->rxq_lock)
   6845 			mutex_obj_free(rxq->rxq_lock);
   6846 	}
   6847  fail_1:
   6848 	for (i = 0; i < tx_done; i++) {
   6849 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6850 		pcq_destroy(txq->txq_interq);
   6851 		wm_free_tx_buffer(sc, txq);
   6852 		wm_free_tx_descs(sc, txq);
   6853 		if (txq->txq_lock)
   6854 			mutex_obj_free(txq->txq_lock);
   6855 	}
   6856 
   6857 	kmem_free(sc->sc_queue,
   6858 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6859  fail_0:
   6860 	return error;
   6861 }
   6862 
   6863 /*
   6864  * wm_free_quques:
   6865  *	Free {tx,rx}descs and {tx,rx} buffers
   6866  */
   6867 static void
   6868 wm_free_txrx_queues(struct wm_softc *sc)
   6869 {
   6870 	int i;
   6871 
   6872 	for (i = 0; i < sc->sc_nqueues; i++)
   6873 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   6874 
   6875 	for (i = 0; i < sc->sc_nqueues; i++) {
   6876 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6877 
   6878 #ifdef WM_EVENT_COUNTERS
   6879 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6880 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6881 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6882 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6883 #endif /* WM_EVENT_COUNTERS */
   6884 
   6885 		wm_free_rx_buffer(sc, rxq);
   6886 		wm_free_rx_descs(sc, rxq);
   6887 		if (rxq->rxq_lock)
   6888 			mutex_obj_free(rxq->rxq_lock);
   6889 	}
   6890 
   6891 	for (i = 0; i < sc->sc_nqueues; i++) {
   6892 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6893 		struct mbuf *m;
   6894 #ifdef WM_EVENT_COUNTERS
   6895 		int j;
   6896 
   6897 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6898 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6899 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6900 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6901 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6902 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6905 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6906 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6907 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6908 
   6909 		for (j = 0; j < WM_NTXSEGS; j++)
   6910 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6911 
   6912 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6913 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6914 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6915 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6916 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6917 #endif /* WM_EVENT_COUNTERS */
   6918 
   6919 		/* Drain txq_interq */
   6920 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6921 			m_freem(m);
   6922 		pcq_destroy(txq->txq_interq);
   6923 
   6924 		wm_free_tx_buffer(sc, txq);
   6925 		wm_free_tx_descs(sc, txq);
   6926 		if (txq->txq_lock)
   6927 			mutex_obj_free(txq->txq_lock);
   6928 	}
   6929 
   6930 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6931 }
   6932 
   6933 static void
   6934 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6935 {
   6936 
   6937 	KASSERT(mutex_owned(txq->txq_lock));
   6938 
   6939 	/* Initialize the transmit descriptor ring. */
   6940 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6941 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6942 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6943 	txq->txq_free = WM_NTXDESC(txq);
   6944 	txq->txq_next = 0;
   6945 }
   6946 
   6947 static void
   6948 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6949     struct wm_txqueue *txq)
   6950 {
   6951 
   6952 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6953 		device_xname(sc->sc_dev), __func__));
   6954 	KASSERT(mutex_owned(txq->txq_lock));
   6955 
   6956 	if (sc->sc_type < WM_T_82543) {
   6957 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6958 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6959 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6960 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6961 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6962 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6963 	} else {
   6964 		int qid = wmq->wmq_id;
   6965 
   6966 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6967 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6968 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6969 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6970 
   6971 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6972 			/*
   6973 			 * Don't write TDT before TCTL.EN is set.
   6974 			 * See the document.
   6975 			 */
   6976 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6977 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6978 			    | TXDCTL_WTHRESH(0));
   6979 		else {
   6980 			/* XXX should update with AIM? */
   6981 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6982 			if (sc->sc_type >= WM_T_82540) {
   6983 				/* Should be the same */
   6984 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6985 			}
   6986 
   6987 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6988 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6989 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6990 		}
   6991 	}
   6992 }
   6993 
   6994 static void
   6995 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6996 {
   6997 	int i;
   6998 
   6999 	KASSERT(mutex_owned(txq->txq_lock));
   7000 
   7001 	/* Initialize the transmit job descriptors. */
   7002 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7003 		txq->txq_soft[i].txs_mbuf = NULL;
   7004 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7005 	txq->txq_snext = 0;
   7006 	txq->txq_sdirty = 0;
   7007 }
   7008 
   7009 static void
   7010 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7011     struct wm_txqueue *txq)
   7012 {
   7013 
   7014 	KASSERT(mutex_owned(txq->txq_lock));
   7015 
   7016 	/*
   7017 	 * Set up some register offsets that are different between
   7018 	 * the i82542 and the i82543 and later chips.
   7019 	 */
   7020 	if (sc->sc_type < WM_T_82543)
   7021 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7022 	else
   7023 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7024 
   7025 	wm_init_tx_descs(sc, txq);
   7026 	wm_init_tx_regs(sc, wmq, txq);
   7027 	wm_init_tx_buffer(sc, txq);
   7028 
   7029 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7030 	txq->txq_sending = false;
   7031 }
   7032 
   7033 static void
   7034 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7035     struct wm_rxqueue *rxq)
   7036 {
   7037 
   7038 	KASSERT(mutex_owned(rxq->rxq_lock));
   7039 
   7040 	/*
   7041 	 * Initialize the receive descriptor and receive job
   7042 	 * descriptor rings.
   7043 	 */
   7044 	if (sc->sc_type < WM_T_82543) {
   7045 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7046 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7047 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7048 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7049 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7050 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7051 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7052 
   7053 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7054 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7055 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7056 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7057 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7058 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7059 	} else {
   7060 		int qid = wmq->wmq_id;
   7061 
   7062 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7063 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7064 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7065 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7066 
   7067 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7068 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7069 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7070 
   7071 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7072 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7073 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7074 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7075 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7076 			    | RXDCTL_WTHRESH(1));
   7077 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7078 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7079 		} else {
   7080 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7081 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7082 			/* XXX should update with AIM? */
   7083 			CSR_WRITE(sc, WMREG_RDTR,
   7084 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7085 			/* MUST be same */
   7086 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7087 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7088 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7089 		}
   7090 	}
   7091 }
   7092 
   7093 static int
   7094 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7095 {
   7096 	struct wm_rxsoft *rxs;
   7097 	int error, i;
   7098 
   7099 	KASSERT(mutex_owned(rxq->rxq_lock));
   7100 
   7101 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7102 		rxs = &rxq->rxq_soft[i];
   7103 		if (rxs->rxs_mbuf == NULL) {
   7104 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7105 				log(LOG_ERR, "%s: unable to allocate or map "
   7106 				    "rx buffer %d, error = %d\n",
   7107 				    device_xname(sc->sc_dev), i, error);
   7108 				/*
   7109 				 * XXX Should attempt to run with fewer receive
   7110 				 * XXX buffers instead of just failing.
   7111 				 */
   7112 				wm_rxdrain(rxq);
   7113 				return ENOMEM;
   7114 			}
   7115 		} else {
   7116 			/*
   7117 			 * For 82575 and 82576, the RX descriptors must be
   7118 			 * initialized after the setting of RCTL.EN in
   7119 			 * wm_set_filter()
   7120 			 */
   7121 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7122 				wm_init_rxdesc(rxq, i);
   7123 		}
   7124 	}
   7125 	rxq->rxq_ptr = 0;
   7126 	rxq->rxq_discard = 0;
   7127 	WM_RXCHAIN_RESET(rxq);
   7128 
   7129 	return 0;
   7130 }
   7131 
   7132 static int
   7133 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7134     struct wm_rxqueue *rxq)
   7135 {
   7136 
   7137 	KASSERT(mutex_owned(rxq->rxq_lock));
   7138 
   7139 	/*
   7140 	 * Set up some register offsets that are different between
   7141 	 * the i82542 and the i82543 and later chips.
   7142 	 */
   7143 	if (sc->sc_type < WM_T_82543)
   7144 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7145 	else
   7146 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7147 
   7148 	wm_init_rx_regs(sc, wmq, rxq);
   7149 	return wm_init_rx_buffer(sc, rxq);
   7150 }
   7151 
   7152 /*
   7153  * wm_init_quques:
   7154  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7155  */
   7156 static int
   7157 wm_init_txrx_queues(struct wm_softc *sc)
   7158 {
   7159 	int i, error = 0;
   7160 
   7161 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7162 		device_xname(sc->sc_dev), __func__));
   7163 
   7164 	for (i = 0; i < sc->sc_nqueues; i++) {
   7165 		struct wm_queue *wmq = &sc->sc_queue[i];
   7166 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7167 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7168 
   7169 		/*
   7170 		 * TODO
   7171 		 * Currently, use constant variable instead of AIM.
   7172 		 * Furthermore, the interrupt interval of multiqueue which use
   7173 		 * polling mode is less than default value.
   7174 		 * More tuning and AIM are required.
   7175 		 */
   7176 		if (wm_is_using_multiqueue(sc))
   7177 			wmq->wmq_itr = 50;
   7178 		else
   7179 			wmq->wmq_itr = sc->sc_itr_init;
   7180 		wmq->wmq_set_itr = true;
   7181 
   7182 		mutex_enter(txq->txq_lock);
   7183 		wm_init_tx_queue(sc, wmq, txq);
   7184 		mutex_exit(txq->txq_lock);
   7185 
   7186 		mutex_enter(rxq->rxq_lock);
   7187 		error = wm_init_rx_queue(sc, wmq, rxq);
   7188 		mutex_exit(rxq->rxq_lock);
   7189 		if (error)
   7190 			break;
   7191 	}
   7192 
   7193 	return error;
   7194 }
   7195 
   7196 /*
   7197  * wm_tx_offload:
   7198  *
   7199  *	Set up TCP/IP checksumming parameters for the
   7200  *	specified packet.
   7201  */
   7202 static int
   7203 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7204     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7205 {
   7206 	struct mbuf *m0 = txs->txs_mbuf;
   7207 	struct livengood_tcpip_ctxdesc *t;
   7208 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7209 	uint32_t ipcse;
   7210 	struct ether_header *eh;
   7211 	int offset, iphl;
   7212 	uint8_t fields;
   7213 
   7214 	/*
   7215 	 * XXX It would be nice if the mbuf pkthdr had offset
   7216 	 * fields for the protocol headers.
   7217 	 */
   7218 
   7219 	eh = mtod(m0, struct ether_header *);
   7220 	switch (htons(eh->ether_type)) {
   7221 	case ETHERTYPE_IP:
   7222 	case ETHERTYPE_IPV6:
   7223 		offset = ETHER_HDR_LEN;
   7224 		break;
   7225 
   7226 	case ETHERTYPE_VLAN:
   7227 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7228 		break;
   7229 
   7230 	default:
   7231 		/* Don't support this protocol or encapsulation. */
   7232 		*fieldsp = 0;
   7233 		*cmdp = 0;
   7234 		return 0;
   7235 	}
   7236 
   7237 	if ((m0->m_pkthdr.csum_flags &
   7238 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7239 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7240 	} else
   7241 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7242 
   7243 	ipcse = offset + iphl - 1;
   7244 
   7245 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7246 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7247 	seg = 0;
   7248 	fields = 0;
   7249 
   7250 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7251 		int hlen = offset + iphl;
   7252 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7253 
   7254 		if (__predict_false(m0->m_len <
   7255 				    (hlen + sizeof(struct tcphdr)))) {
   7256 			/*
   7257 			 * TCP/IP headers are not in the first mbuf; we need
   7258 			 * to do this the slow and painful way. Let's just
   7259 			 * hope this doesn't happen very often.
   7260 			 */
   7261 			struct tcphdr th;
   7262 
   7263 			WM_Q_EVCNT_INCR(txq, tsopain);
   7264 
   7265 			m_copydata(m0, hlen, sizeof(th), &th);
   7266 			if (v4) {
   7267 				struct ip ip;
   7268 
   7269 				m_copydata(m0, offset, sizeof(ip), &ip);
   7270 				ip.ip_len = 0;
   7271 				m_copyback(m0,
   7272 				    offset + offsetof(struct ip, ip_len),
   7273 				    sizeof(ip.ip_len), &ip.ip_len);
   7274 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7275 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7276 			} else {
   7277 				struct ip6_hdr ip6;
   7278 
   7279 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7280 				ip6.ip6_plen = 0;
   7281 				m_copyback(m0,
   7282 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7283 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7284 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7285 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7286 			}
   7287 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7288 			    sizeof(th.th_sum), &th.th_sum);
   7289 
   7290 			hlen += th.th_off << 2;
   7291 		} else {
   7292 			/*
   7293 			 * TCP/IP headers are in the first mbuf; we can do
   7294 			 * this the easy way.
   7295 			 */
   7296 			struct tcphdr *th;
   7297 
   7298 			if (v4) {
   7299 				struct ip *ip =
   7300 				    (void *)(mtod(m0, char *) + offset);
   7301 				th = (void *)(mtod(m0, char *) + hlen);
   7302 
   7303 				ip->ip_len = 0;
   7304 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7305 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7306 			} else {
   7307 				struct ip6_hdr *ip6 =
   7308 				    (void *)(mtod(m0, char *) + offset);
   7309 				th = (void *)(mtod(m0, char *) + hlen);
   7310 
   7311 				ip6->ip6_plen = 0;
   7312 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7313 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7314 			}
   7315 			hlen += th->th_off << 2;
   7316 		}
   7317 
   7318 		if (v4) {
   7319 			WM_Q_EVCNT_INCR(txq, tso);
   7320 			cmdlen |= WTX_TCPIP_CMD_IP;
   7321 		} else {
   7322 			WM_Q_EVCNT_INCR(txq, tso6);
   7323 			ipcse = 0;
   7324 		}
   7325 		cmd |= WTX_TCPIP_CMD_TSE;
   7326 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7327 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7328 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7329 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7330 	}
   7331 
   7332 	/*
   7333 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7334 	 * offload feature, if we load the context descriptor, we
   7335 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7336 	 */
   7337 
   7338 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7339 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7340 	    WTX_TCPIP_IPCSE(ipcse);
   7341 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7342 		WM_Q_EVCNT_INCR(txq, ipsum);
   7343 		fields |= WTX_IXSM;
   7344 	}
   7345 
   7346 	offset += iphl;
   7347 
   7348 	if (m0->m_pkthdr.csum_flags &
   7349 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7350 		WM_Q_EVCNT_INCR(txq, tusum);
   7351 		fields |= WTX_TXSM;
   7352 		tucs = WTX_TCPIP_TUCSS(offset) |
   7353 		    WTX_TCPIP_TUCSO(offset +
   7354 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7355 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7356 	} else if ((m0->m_pkthdr.csum_flags &
   7357 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7358 		WM_Q_EVCNT_INCR(txq, tusum6);
   7359 		fields |= WTX_TXSM;
   7360 		tucs = WTX_TCPIP_TUCSS(offset) |
   7361 		    WTX_TCPIP_TUCSO(offset +
   7362 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7363 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7364 	} else {
   7365 		/* Just initialize it to a valid TCP context. */
   7366 		tucs = WTX_TCPIP_TUCSS(offset) |
   7367 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7368 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7369 	}
   7370 
   7371 	/*
   7372 	 * We don't have to write context descriptor for every packet
   7373 	 * except for 82574. For 82574, we must write context descriptor
   7374 	 * for every packet when we use two descriptor queues.
   7375 	 * It would be overhead to write context descriptor for every packet,
   7376 	 * however it does not cause problems.
   7377 	 */
   7378 	/* Fill in the context descriptor. */
   7379 	t = (struct livengood_tcpip_ctxdesc *)
   7380 	    &txq->txq_descs[txq->txq_next];
   7381 	t->tcpip_ipcs = htole32(ipcs);
   7382 	t->tcpip_tucs = htole32(tucs);
   7383 	t->tcpip_cmdlen = htole32(cmdlen);
   7384 	t->tcpip_seg = htole32(seg);
   7385 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7386 
   7387 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7388 	txs->txs_ndesc++;
   7389 
   7390 	*cmdp = cmd;
   7391 	*fieldsp = fields;
   7392 
   7393 	return 0;
   7394 }
   7395 
   7396 static inline int
   7397 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7398 {
   7399 	struct wm_softc *sc = ifp->if_softc;
   7400 	u_int cpuid = cpu_index(curcpu());
   7401 
   7402 	/*
   7403 	 * Currently, simple distribute strategy.
   7404 	 * TODO:
   7405 	 * distribute by flowid(RSS has value).
   7406 	 */
   7407 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7408 }
   7409 
   7410 /*
   7411  * wm_start:		[ifnet interface function]
   7412  *
   7413  *	Start packet transmission on the interface.
   7414  */
   7415 static void
   7416 wm_start(struct ifnet *ifp)
   7417 {
   7418 	struct wm_softc *sc = ifp->if_softc;
   7419 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7420 
   7421 #ifdef WM_MPSAFE
   7422 	KASSERT(if_is_mpsafe(ifp));
   7423 #endif
   7424 	/*
   7425 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7426 	 */
   7427 
   7428 	mutex_enter(txq->txq_lock);
   7429 	if (!txq->txq_stopping)
   7430 		wm_start_locked(ifp);
   7431 	mutex_exit(txq->txq_lock);
   7432 }
   7433 
   7434 static void
   7435 wm_start_locked(struct ifnet *ifp)
   7436 {
   7437 	struct wm_softc *sc = ifp->if_softc;
   7438 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7439 
   7440 	wm_send_common_locked(ifp, txq, false);
   7441 }
   7442 
   7443 static int
   7444 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7445 {
   7446 	int qid;
   7447 	struct wm_softc *sc = ifp->if_softc;
   7448 	struct wm_txqueue *txq;
   7449 
   7450 	qid = wm_select_txqueue(ifp, m);
   7451 	txq = &sc->sc_queue[qid].wmq_txq;
   7452 
   7453 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7454 		m_freem(m);
   7455 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7456 		return ENOBUFS;
   7457 	}
   7458 
   7459 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7460 	ifp->if_obytes += m->m_pkthdr.len;
   7461 	if (m->m_flags & M_MCAST)
   7462 		ifp->if_omcasts++;
   7463 
   7464 	if (mutex_tryenter(txq->txq_lock)) {
   7465 		if (!txq->txq_stopping)
   7466 			wm_transmit_locked(ifp, txq);
   7467 		mutex_exit(txq->txq_lock);
   7468 	}
   7469 
   7470 	return 0;
   7471 }
   7472 
   7473 static void
   7474 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7475 {
   7476 
   7477 	wm_send_common_locked(ifp, txq, true);
   7478 }
   7479 
   7480 static void
   7481 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7482     bool is_transmit)
   7483 {
   7484 	struct wm_softc *sc = ifp->if_softc;
   7485 	struct mbuf *m0;
   7486 	struct wm_txsoft *txs;
   7487 	bus_dmamap_t dmamap;
   7488 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7489 	bus_addr_t curaddr;
   7490 	bus_size_t seglen, curlen;
   7491 	uint32_t cksumcmd;
   7492 	uint8_t cksumfields;
   7493 	bool remap = true;
   7494 
   7495 	KASSERT(mutex_owned(txq->txq_lock));
   7496 
   7497 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7498 		return;
   7499 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7500 		return;
   7501 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7502 		return;
   7503 
   7504 	/* Remember the previous number of free descriptors. */
   7505 	ofree = txq->txq_free;
   7506 
   7507 	/*
   7508 	 * Loop through the send queue, setting up transmit descriptors
   7509 	 * until we drain the queue, or use up all available transmit
   7510 	 * descriptors.
   7511 	 */
   7512 	for (;;) {
   7513 		m0 = NULL;
   7514 
   7515 		/* Get a work queue entry. */
   7516 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7517 			wm_txeof(txq, UINT_MAX);
   7518 			if (txq->txq_sfree == 0) {
   7519 				DPRINTF(WM_DEBUG_TX,
   7520 				    ("%s: TX: no free job descriptors\n",
   7521 					device_xname(sc->sc_dev)));
   7522 				WM_Q_EVCNT_INCR(txq, txsstall);
   7523 				break;
   7524 			}
   7525 		}
   7526 
   7527 		/* Grab a packet off the queue. */
   7528 		if (is_transmit)
   7529 			m0 = pcq_get(txq->txq_interq);
   7530 		else
   7531 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7532 		if (m0 == NULL)
   7533 			break;
   7534 
   7535 		DPRINTF(WM_DEBUG_TX,
   7536 		    ("%s: TX: have packet to transmit: %p\n",
   7537 			device_xname(sc->sc_dev), m0));
   7538 
   7539 		txs = &txq->txq_soft[txq->txq_snext];
   7540 		dmamap = txs->txs_dmamap;
   7541 
   7542 		use_tso = (m0->m_pkthdr.csum_flags &
   7543 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7544 
   7545 		/*
   7546 		 * So says the Linux driver:
   7547 		 * The controller does a simple calculation to make sure
   7548 		 * there is enough room in the FIFO before initiating the
   7549 		 * DMA for each buffer. The calc is:
   7550 		 *	4 = ceil(buffer len / MSS)
   7551 		 * To make sure we don't overrun the FIFO, adjust the max
   7552 		 * buffer len if the MSS drops.
   7553 		 */
   7554 		dmamap->dm_maxsegsz =
   7555 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7556 		    ? m0->m_pkthdr.segsz << 2
   7557 		    : WTX_MAX_LEN;
   7558 
   7559 		/*
   7560 		 * Load the DMA map.  If this fails, the packet either
   7561 		 * didn't fit in the allotted number of segments, or we
   7562 		 * were short on resources.  For the too-many-segments
   7563 		 * case, we simply report an error and drop the packet,
   7564 		 * since we can't sanely copy a jumbo packet to a single
   7565 		 * buffer.
   7566 		 */
   7567 retry:
   7568 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7569 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7570 		if (__predict_false(error)) {
   7571 			if (error == EFBIG) {
   7572 				if (remap == true) {
   7573 					struct mbuf *m;
   7574 
   7575 					remap = false;
   7576 					m = m_defrag(m0, M_NOWAIT);
   7577 					if (m != NULL) {
   7578 						WM_Q_EVCNT_INCR(txq, defrag);
   7579 						m0 = m;
   7580 						goto retry;
   7581 					}
   7582 				}
   7583 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7584 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7585 				    "DMA segments, dropping...\n",
   7586 				    device_xname(sc->sc_dev));
   7587 				wm_dump_mbuf_chain(sc, m0);
   7588 				m_freem(m0);
   7589 				continue;
   7590 			}
   7591 			/* Short on resources, just stop for now. */
   7592 			DPRINTF(WM_DEBUG_TX,
   7593 			    ("%s: TX: dmamap load failed: %d\n",
   7594 				device_xname(sc->sc_dev), error));
   7595 			break;
   7596 		}
   7597 
   7598 		segs_needed = dmamap->dm_nsegs;
   7599 		if (use_tso) {
   7600 			/* For sentinel descriptor; see below. */
   7601 			segs_needed++;
   7602 		}
   7603 
   7604 		/*
   7605 		 * Ensure we have enough descriptors free to describe
   7606 		 * the packet. Note, we always reserve one descriptor
   7607 		 * at the end of the ring due to the semantics of the
   7608 		 * TDT register, plus one more in the event we need
   7609 		 * to load offload context.
   7610 		 */
   7611 		if (segs_needed > txq->txq_free - 2) {
   7612 			/*
   7613 			 * Not enough free descriptors to transmit this
   7614 			 * packet.  We haven't committed anything yet,
   7615 			 * so just unload the DMA map, put the packet
   7616 			 * pack on the queue, and punt. Notify the upper
   7617 			 * layer that there are no more slots left.
   7618 			 */
   7619 			DPRINTF(WM_DEBUG_TX,
   7620 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7621 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7622 				segs_needed, txq->txq_free - 1));
   7623 			if (!is_transmit)
   7624 				ifp->if_flags |= IFF_OACTIVE;
   7625 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7626 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7627 			WM_Q_EVCNT_INCR(txq, txdstall);
   7628 			break;
   7629 		}
   7630 
   7631 		/*
   7632 		 * Check for 82547 Tx FIFO bug. We need to do this
   7633 		 * once we know we can transmit the packet, since we
   7634 		 * do some internal FIFO space accounting here.
   7635 		 */
   7636 		if (sc->sc_type == WM_T_82547 &&
   7637 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7638 			DPRINTF(WM_DEBUG_TX,
   7639 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7640 				device_xname(sc->sc_dev)));
   7641 			if (!is_transmit)
   7642 				ifp->if_flags |= IFF_OACTIVE;
   7643 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7644 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7645 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7646 			break;
   7647 		}
   7648 
   7649 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7650 
   7651 		DPRINTF(WM_DEBUG_TX,
   7652 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7653 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7654 
   7655 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7656 
   7657 		/*
   7658 		 * Store a pointer to the packet so that we can free it
   7659 		 * later.
   7660 		 *
   7661 		 * Initially, we consider the number of descriptors the
   7662 		 * packet uses the number of DMA segments.  This may be
   7663 		 * incremented by 1 if we do checksum offload (a descriptor
   7664 		 * is used to set the checksum context).
   7665 		 */
   7666 		txs->txs_mbuf = m0;
   7667 		txs->txs_firstdesc = txq->txq_next;
   7668 		txs->txs_ndesc = segs_needed;
   7669 
   7670 		/* Set up offload parameters for this packet. */
   7671 		if (m0->m_pkthdr.csum_flags &
   7672 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7673 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7674 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7675 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7676 					  &cksumfields) != 0) {
   7677 				/* Error message already displayed. */
   7678 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7679 				continue;
   7680 			}
   7681 		} else {
   7682 			cksumcmd = 0;
   7683 			cksumfields = 0;
   7684 		}
   7685 
   7686 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7687 
   7688 		/* Sync the DMA map. */
   7689 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7690 		    BUS_DMASYNC_PREWRITE);
   7691 
   7692 		/* Initialize the transmit descriptor. */
   7693 		for (nexttx = txq->txq_next, seg = 0;
   7694 		     seg < dmamap->dm_nsegs; seg++) {
   7695 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7696 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7697 			     seglen != 0;
   7698 			     curaddr += curlen, seglen -= curlen,
   7699 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7700 				curlen = seglen;
   7701 
   7702 				/*
   7703 				 * So says the Linux driver:
   7704 				 * Work around for premature descriptor
   7705 				 * write-backs in TSO mode.  Append a
   7706 				 * 4-byte sentinel descriptor.
   7707 				 */
   7708 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7709 				    curlen > 8)
   7710 					curlen -= 4;
   7711 
   7712 				wm_set_dma_addr(
   7713 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7714 				txq->txq_descs[nexttx].wtx_cmdlen
   7715 				    = htole32(cksumcmd | curlen);
   7716 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7717 				    = 0;
   7718 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7719 				    = cksumfields;
   7720 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7721 				lasttx = nexttx;
   7722 
   7723 				DPRINTF(WM_DEBUG_TX,
   7724 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7725 					"len %#04zx\n",
   7726 					device_xname(sc->sc_dev), nexttx,
   7727 					(uint64_t)curaddr, curlen));
   7728 			}
   7729 		}
   7730 
   7731 		KASSERT(lasttx != -1);
   7732 
   7733 		/*
   7734 		 * Set up the command byte on the last descriptor of
   7735 		 * the packet. If we're in the interrupt delay window,
   7736 		 * delay the interrupt.
   7737 		 */
   7738 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7739 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7740 
   7741 		/*
   7742 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7743 		 * up the descriptor to encapsulate the packet for us.
   7744 		 *
   7745 		 * This is only valid on the last descriptor of the packet.
   7746 		 */
   7747 		if (vlan_has_tag(m0)) {
   7748 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7749 			    htole32(WTX_CMD_VLE);
   7750 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7751 			    = htole16(vlan_get_tag(m0));
   7752 		}
   7753 
   7754 		txs->txs_lastdesc = lasttx;
   7755 
   7756 		DPRINTF(WM_DEBUG_TX,
   7757 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7758 			device_xname(sc->sc_dev),
   7759 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7760 
   7761 		/* Sync the descriptors we're using. */
   7762 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7763 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7764 
   7765 		/* Give the packet to the chip. */
   7766 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7767 
   7768 		DPRINTF(WM_DEBUG_TX,
   7769 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7770 
   7771 		DPRINTF(WM_DEBUG_TX,
   7772 		    ("%s: TX: finished transmitting packet, job %d\n",
   7773 			device_xname(sc->sc_dev), txq->txq_snext));
   7774 
   7775 		/* Advance the tx pointer. */
   7776 		txq->txq_free -= txs->txs_ndesc;
   7777 		txq->txq_next = nexttx;
   7778 
   7779 		txq->txq_sfree--;
   7780 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7781 
   7782 		/* Pass the packet to any BPF listeners. */
   7783 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7784 	}
   7785 
   7786 	if (m0 != NULL) {
   7787 		if (!is_transmit)
   7788 			ifp->if_flags |= IFF_OACTIVE;
   7789 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7790 		WM_Q_EVCNT_INCR(txq, descdrop);
   7791 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7792 			__func__));
   7793 		m_freem(m0);
   7794 	}
   7795 
   7796 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7797 		/* No more slots; notify upper layer. */
   7798 		if (!is_transmit)
   7799 			ifp->if_flags |= IFF_OACTIVE;
   7800 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7801 	}
   7802 
   7803 	if (txq->txq_free != ofree) {
   7804 		/* Set a watchdog timer in case the chip flakes out. */
   7805 		txq->txq_lastsent = time_uptime;
   7806 		txq->txq_sending = true;
   7807 	}
   7808 }
   7809 
   7810 /*
   7811  * wm_nq_tx_offload:
   7812  *
   7813  *	Set up TCP/IP checksumming parameters for the
   7814  *	specified packet, for NEWQUEUE devices
   7815  */
   7816 static int
   7817 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7818     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7819 {
   7820 	struct mbuf *m0 = txs->txs_mbuf;
   7821 	uint32_t vl_len, mssidx, cmdc;
   7822 	struct ether_header *eh;
   7823 	int offset, iphl;
   7824 
   7825 	/*
   7826 	 * XXX It would be nice if the mbuf pkthdr had offset
   7827 	 * fields for the protocol headers.
   7828 	 */
   7829 	*cmdlenp = 0;
   7830 	*fieldsp = 0;
   7831 
   7832 	eh = mtod(m0, struct ether_header *);
   7833 	switch (htons(eh->ether_type)) {
   7834 	case ETHERTYPE_IP:
   7835 	case ETHERTYPE_IPV6:
   7836 		offset = ETHER_HDR_LEN;
   7837 		break;
   7838 
   7839 	case ETHERTYPE_VLAN:
   7840 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7841 		break;
   7842 
   7843 	default:
   7844 		/* Don't support this protocol or encapsulation. */
   7845 		*do_csum = false;
   7846 		return 0;
   7847 	}
   7848 	*do_csum = true;
   7849 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7850 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7851 
   7852 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7853 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7854 
   7855 	if ((m0->m_pkthdr.csum_flags &
   7856 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7857 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7858 	} else {
   7859 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7860 	}
   7861 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7862 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7863 
   7864 	if (vlan_has_tag(m0)) {
   7865 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7866 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7867 		*cmdlenp |= NQTX_CMD_VLE;
   7868 	}
   7869 
   7870 	mssidx = 0;
   7871 
   7872 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7873 		int hlen = offset + iphl;
   7874 		int tcp_hlen;
   7875 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7876 
   7877 		if (__predict_false(m0->m_len <
   7878 				    (hlen + sizeof(struct tcphdr)))) {
   7879 			/*
   7880 			 * TCP/IP headers are not in the first mbuf; we need
   7881 			 * to do this the slow and painful way. Let's just
   7882 			 * hope this doesn't happen very often.
   7883 			 */
   7884 			struct tcphdr th;
   7885 
   7886 			WM_Q_EVCNT_INCR(txq, tsopain);
   7887 
   7888 			m_copydata(m0, hlen, sizeof(th), &th);
   7889 			if (v4) {
   7890 				struct ip ip;
   7891 
   7892 				m_copydata(m0, offset, sizeof(ip), &ip);
   7893 				ip.ip_len = 0;
   7894 				m_copyback(m0,
   7895 				    offset + offsetof(struct ip, ip_len),
   7896 				    sizeof(ip.ip_len), &ip.ip_len);
   7897 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7898 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7899 			} else {
   7900 				struct ip6_hdr ip6;
   7901 
   7902 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7903 				ip6.ip6_plen = 0;
   7904 				m_copyback(m0,
   7905 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7906 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7907 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7908 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7909 			}
   7910 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7911 			    sizeof(th.th_sum), &th.th_sum);
   7912 
   7913 			tcp_hlen = th.th_off << 2;
   7914 		} else {
   7915 			/*
   7916 			 * TCP/IP headers are in the first mbuf; we can do
   7917 			 * this the easy way.
   7918 			 */
   7919 			struct tcphdr *th;
   7920 
   7921 			if (v4) {
   7922 				struct ip *ip =
   7923 				    (void *)(mtod(m0, char *) + offset);
   7924 				th = (void *)(mtod(m0, char *) + hlen);
   7925 
   7926 				ip->ip_len = 0;
   7927 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7928 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7929 			} else {
   7930 				struct ip6_hdr *ip6 =
   7931 				    (void *)(mtod(m0, char *) + offset);
   7932 				th = (void *)(mtod(m0, char *) + hlen);
   7933 
   7934 				ip6->ip6_plen = 0;
   7935 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7936 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7937 			}
   7938 			tcp_hlen = th->th_off << 2;
   7939 		}
   7940 		hlen += tcp_hlen;
   7941 		*cmdlenp |= NQTX_CMD_TSE;
   7942 
   7943 		if (v4) {
   7944 			WM_Q_EVCNT_INCR(txq, tso);
   7945 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7946 		} else {
   7947 			WM_Q_EVCNT_INCR(txq, tso6);
   7948 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7949 		}
   7950 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7951 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7952 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7953 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7954 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7955 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7956 	} else {
   7957 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7958 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7959 	}
   7960 
   7961 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7962 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7963 		cmdc |= NQTXC_CMD_IP4;
   7964 	}
   7965 
   7966 	if (m0->m_pkthdr.csum_flags &
   7967 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7968 		WM_Q_EVCNT_INCR(txq, tusum);
   7969 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7970 			cmdc |= NQTXC_CMD_TCP;
   7971 		else
   7972 			cmdc |= NQTXC_CMD_UDP;
   7973 
   7974 		cmdc |= NQTXC_CMD_IP4;
   7975 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7976 	}
   7977 	if (m0->m_pkthdr.csum_flags &
   7978 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7979 		WM_Q_EVCNT_INCR(txq, tusum6);
   7980 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7981 			cmdc |= NQTXC_CMD_TCP;
   7982 		else
   7983 			cmdc |= NQTXC_CMD_UDP;
   7984 
   7985 		cmdc |= NQTXC_CMD_IP6;
   7986 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7987 	}
   7988 
   7989 	/*
   7990 	 * We don't have to write context descriptor for every packet to
   7991 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7992 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7993 	 * controllers.
   7994 	 * It would be overhead to write context descriptor for every packet,
   7995 	 * however it does not cause problems.
   7996 	 */
   7997 	/* Fill in the context descriptor. */
   7998 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7999 	    htole32(vl_len);
   8000 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8001 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8002 	    htole32(cmdc);
   8003 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8004 	    htole32(mssidx);
   8005 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8006 	DPRINTF(WM_DEBUG_TX,
   8007 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8008 		txq->txq_next, 0, vl_len));
   8009 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8010 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8011 	txs->txs_ndesc++;
   8012 	return 0;
   8013 }
   8014 
   8015 /*
   8016  * wm_nq_start:		[ifnet interface function]
   8017  *
   8018  *	Start packet transmission on the interface for NEWQUEUE devices
   8019  */
   8020 static void
   8021 wm_nq_start(struct ifnet *ifp)
   8022 {
   8023 	struct wm_softc *sc = ifp->if_softc;
   8024 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8025 
   8026 #ifdef WM_MPSAFE
   8027 	KASSERT(if_is_mpsafe(ifp));
   8028 #endif
   8029 	/*
   8030 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8031 	 */
   8032 
   8033 	mutex_enter(txq->txq_lock);
   8034 	if (!txq->txq_stopping)
   8035 		wm_nq_start_locked(ifp);
   8036 	mutex_exit(txq->txq_lock);
   8037 }
   8038 
   8039 static void
   8040 wm_nq_start_locked(struct ifnet *ifp)
   8041 {
   8042 	struct wm_softc *sc = ifp->if_softc;
   8043 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8044 
   8045 	wm_nq_send_common_locked(ifp, txq, false);
   8046 }
   8047 
   8048 static int
   8049 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8050 {
   8051 	int qid;
   8052 	struct wm_softc *sc = ifp->if_softc;
   8053 	struct wm_txqueue *txq;
   8054 
   8055 	qid = wm_select_txqueue(ifp, m);
   8056 	txq = &sc->sc_queue[qid].wmq_txq;
   8057 
   8058 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8059 		m_freem(m);
   8060 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8061 		return ENOBUFS;
   8062 	}
   8063 
   8064 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8065 	ifp->if_obytes += m->m_pkthdr.len;
   8066 	if (m->m_flags & M_MCAST)
   8067 		ifp->if_omcasts++;
   8068 
   8069 	/*
   8070 	 * The situations which this mutex_tryenter() fails at running time
   8071 	 * are below two patterns.
   8072 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8073 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8074 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8075 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8076 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8077 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8078 	 * stuck, either.
   8079 	 */
   8080 	if (mutex_tryenter(txq->txq_lock)) {
   8081 		if (!txq->txq_stopping)
   8082 			wm_nq_transmit_locked(ifp, txq);
   8083 		mutex_exit(txq->txq_lock);
   8084 	}
   8085 
   8086 	return 0;
   8087 }
   8088 
   8089 static void
   8090 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8091 {
   8092 
   8093 	wm_nq_send_common_locked(ifp, txq, true);
   8094 }
   8095 
   8096 static void
   8097 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8098     bool is_transmit)
   8099 {
   8100 	struct wm_softc *sc = ifp->if_softc;
   8101 	struct mbuf *m0;
   8102 	struct wm_txsoft *txs;
   8103 	bus_dmamap_t dmamap;
   8104 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8105 	bool do_csum, sent;
   8106 	bool remap = true;
   8107 
   8108 	KASSERT(mutex_owned(txq->txq_lock));
   8109 
   8110 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8111 		return;
   8112 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8113 		return;
   8114 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8115 		return;
   8116 
   8117 	sent = false;
   8118 
   8119 	/*
   8120 	 * Loop through the send queue, setting up transmit descriptors
   8121 	 * until we drain the queue, or use up all available transmit
   8122 	 * descriptors.
   8123 	 */
   8124 	for (;;) {
   8125 		m0 = NULL;
   8126 
   8127 		/* Get a work queue entry. */
   8128 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8129 			wm_txeof(txq, UINT_MAX);
   8130 			if (txq->txq_sfree == 0) {
   8131 				DPRINTF(WM_DEBUG_TX,
   8132 				    ("%s: TX: no free job descriptors\n",
   8133 					device_xname(sc->sc_dev)));
   8134 				WM_Q_EVCNT_INCR(txq, txsstall);
   8135 				break;
   8136 			}
   8137 		}
   8138 
   8139 		/* Grab a packet off the queue. */
   8140 		if (is_transmit)
   8141 			m0 = pcq_get(txq->txq_interq);
   8142 		else
   8143 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8144 		if (m0 == NULL)
   8145 			break;
   8146 
   8147 		DPRINTF(WM_DEBUG_TX,
   8148 		    ("%s: TX: have packet to transmit: %p\n",
   8149 		    device_xname(sc->sc_dev), m0));
   8150 
   8151 		txs = &txq->txq_soft[txq->txq_snext];
   8152 		dmamap = txs->txs_dmamap;
   8153 
   8154 		/*
   8155 		 * Load the DMA map.  If this fails, the packet either
   8156 		 * didn't fit in the allotted number of segments, or we
   8157 		 * were short on resources.  For the too-many-segments
   8158 		 * case, we simply report an error and drop the packet,
   8159 		 * since we can't sanely copy a jumbo packet to a single
   8160 		 * buffer.
   8161 		 */
   8162 retry:
   8163 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8164 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8165 		if (__predict_false(error)) {
   8166 			if (error == EFBIG) {
   8167 				if (remap == true) {
   8168 					struct mbuf *m;
   8169 
   8170 					remap = false;
   8171 					m = m_defrag(m0, M_NOWAIT);
   8172 					if (m != NULL) {
   8173 						WM_Q_EVCNT_INCR(txq, defrag);
   8174 						m0 = m;
   8175 						goto retry;
   8176 					}
   8177 				}
   8178 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8179 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8180 				    "DMA segments, dropping...\n",
   8181 				    device_xname(sc->sc_dev));
   8182 				wm_dump_mbuf_chain(sc, m0);
   8183 				m_freem(m0);
   8184 				continue;
   8185 			}
   8186 			/* Short on resources, just stop for now. */
   8187 			DPRINTF(WM_DEBUG_TX,
   8188 			    ("%s: TX: dmamap load failed: %d\n",
   8189 				device_xname(sc->sc_dev), error));
   8190 			break;
   8191 		}
   8192 
   8193 		segs_needed = dmamap->dm_nsegs;
   8194 
   8195 		/*
   8196 		 * Ensure we have enough descriptors free to describe
   8197 		 * the packet. Note, we always reserve one descriptor
   8198 		 * at the end of the ring due to the semantics of the
   8199 		 * TDT register, plus one more in the event we need
   8200 		 * to load offload context.
   8201 		 */
   8202 		if (segs_needed > txq->txq_free - 2) {
   8203 			/*
   8204 			 * Not enough free descriptors to transmit this
   8205 			 * packet.  We haven't committed anything yet,
   8206 			 * so just unload the DMA map, put the packet
   8207 			 * pack on the queue, and punt. Notify the upper
   8208 			 * layer that there are no more slots left.
   8209 			 */
   8210 			DPRINTF(WM_DEBUG_TX,
   8211 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8212 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8213 				segs_needed, txq->txq_free - 1));
   8214 			if (!is_transmit)
   8215 				ifp->if_flags |= IFF_OACTIVE;
   8216 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8217 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8218 			WM_Q_EVCNT_INCR(txq, txdstall);
   8219 			break;
   8220 		}
   8221 
   8222 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8223 
   8224 		DPRINTF(WM_DEBUG_TX,
   8225 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8226 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8227 
   8228 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8229 
   8230 		/*
   8231 		 * Store a pointer to the packet so that we can free it
   8232 		 * later.
   8233 		 *
   8234 		 * Initially, we consider the number of descriptors the
   8235 		 * packet uses the number of DMA segments.  This may be
   8236 		 * incremented by 1 if we do checksum offload (a descriptor
   8237 		 * is used to set the checksum context).
   8238 		 */
   8239 		txs->txs_mbuf = m0;
   8240 		txs->txs_firstdesc = txq->txq_next;
   8241 		txs->txs_ndesc = segs_needed;
   8242 
   8243 		/* Set up offload parameters for this packet. */
   8244 		uint32_t cmdlen, fields, dcmdlen;
   8245 		if (m0->m_pkthdr.csum_flags &
   8246 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8247 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8248 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8249 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8250 			    &do_csum) != 0) {
   8251 				/* Error message already displayed. */
   8252 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8253 				continue;
   8254 			}
   8255 		} else {
   8256 			do_csum = false;
   8257 			cmdlen = 0;
   8258 			fields = 0;
   8259 		}
   8260 
   8261 		/* Sync the DMA map. */
   8262 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8263 		    BUS_DMASYNC_PREWRITE);
   8264 
   8265 		/* Initialize the first transmit descriptor. */
   8266 		nexttx = txq->txq_next;
   8267 		if (!do_csum) {
   8268 			/* Setup a legacy descriptor */
   8269 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8270 			    dmamap->dm_segs[0].ds_addr);
   8271 			txq->txq_descs[nexttx].wtx_cmdlen =
   8272 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8273 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8274 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8275 			if (vlan_has_tag(m0)) {
   8276 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8277 				    htole32(WTX_CMD_VLE);
   8278 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8279 				    htole16(vlan_get_tag(m0));
   8280 			} else
   8281 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8282 
   8283 			dcmdlen = 0;
   8284 		} else {
   8285 			/* Setup an advanced data descriptor */
   8286 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8287 			    htole64(dmamap->dm_segs[0].ds_addr);
   8288 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8289 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8290 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8291 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8292 			    htole32(fields);
   8293 			DPRINTF(WM_DEBUG_TX,
   8294 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8295 				device_xname(sc->sc_dev), nexttx,
   8296 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8297 			DPRINTF(WM_DEBUG_TX,
   8298 			    ("\t 0x%08x%08x\n", fields,
   8299 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8300 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8301 		}
   8302 
   8303 		lasttx = nexttx;
   8304 		nexttx = WM_NEXTTX(txq, nexttx);
   8305 		/*
   8306 		 * Fill in the next descriptors. legacy or advanced format
   8307 		 * is the same here
   8308 		 */
   8309 		for (seg = 1; seg < dmamap->dm_nsegs;
   8310 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8311 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8312 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8313 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8314 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8315 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8316 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8317 			lasttx = nexttx;
   8318 
   8319 			DPRINTF(WM_DEBUG_TX,
   8320 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8321 				device_xname(sc->sc_dev), nexttx,
   8322 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8323 				dmamap->dm_segs[seg].ds_len));
   8324 		}
   8325 
   8326 		KASSERT(lasttx != -1);
   8327 
   8328 		/*
   8329 		 * Set up the command byte on the last descriptor of
   8330 		 * the packet. If we're in the interrupt delay window,
   8331 		 * delay the interrupt.
   8332 		 */
   8333 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8334 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8335 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8336 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8337 
   8338 		txs->txs_lastdesc = lasttx;
   8339 
   8340 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8341 		    device_xname(sc->sc_dev),
   8342 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8343 
   8344 		/* Sync the descriptors we're using. */
   8345 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8346 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8347 
   8348 		/* Give the packet to the chip. */
   8349 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8350 		sent = true;
   8351 
   8352 		DPRINTF(WM_DEBUG_TX,
   8353 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8354 
   8355 		DPRINTF(WM_DEBUG_TX,
   8356 		    ("%s: TX: finished transmitting packet, job %d\n",
   8357 			device_xname(sc->sc_dev), txq->txq_snext));
   8358 
   8359 		/* Advance the tx pointer. */
   8360 		txq->txq_free -= txs->txs_ndesc;
   8361 		txq->txq_next = nexttx;
   8362 
   8363 		txq->txq_sfree--;
   8364 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8365 
   8366 		/* Pass the packet to any BPF listeners. */
   8367 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8368 	}
   8369 
   8370 	if (m0 != NULL) {
   8371 		if (!is_transmit)
   8372 			ifp->if_flags |= IFF_OACTIVE;
   8373 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8374 		WM_Q_EVCNT_INCR(txq, descdrop);
   8375 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8376 			__func__));
   8377 		m_freem(m0);
   8378 	}
   8379 
   8380 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8381 		/* No more slots; notify upper layer. */
   8382 		if (!is_transmit)
   8383 			ifp->if_flags |= IFF_OACTIVE;
   8384 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8385 	}
   8386 
   8387 	if (sent) {
   8388 		/* Set a watchdog timer in case the chip flakes out. */
   8389 		txq->txq_lastsent = time_uptime;
   8390 		txq->txq_sending = true;
   8391 	}
   8392 }
   8393 
   8394 static void
   8395 wm_deferred_start_locked(struct wm_txqueue *txq)
   8396 {
   8397 	struct wm_softc *sc = txq->txq_sc;
   8398 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8399 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8400 	int qid = wmq->wmq_id;
   8401 
   8402 	KASSERT(mutex_owned(txq->txq_lock));
   8403 
   8404 	if (txq->txq_stopping) {
   8405 		mutex_exit(txq->txq_lock);
   8406 		return;
   8407 	}
   8408 
   8409 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8410 		/* XXX need for ALTQ or one CPU system */
   8411 		if (qid == 0)
   8412 			wm_nq_start_locked(ifp);
   8413 		wm_nq_transmit_locked(ifp, txq);
   8414 	} else {
   8415 		/* XXX need for ALTQ or one CPU system */
   8416 		if (qid == 0)
   8417 			wm_start_locked(ifp);
   8418 		wm_transmit_locked(ifp, txq);
   8419 	}
   8420 }
   8421 
   8422 /* Interrupt */
   8423 
   8424 /*
   8425  * wm_txeof:
   8426  *
   8427  *	Helper; handle transmit interrupts.
   8428  */
   8429 static bool
   8430 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8431 {
   8432 	struct wm_softc *sc = txq->txq_sc;
   8433 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8434 	struct wm_txsoft *txs;
   8435 	int count = 0;
   8436 	int i;
   8437 	uint8_t status;
   8438 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8439 	bool more = false;
   8440 
   8441 	KASSERT(mutex_owned(txq->txq_lock));
   8442 
   8443 	if (txq->txq_stopping)
   8444 		return false;
   8445 
   8446 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8447 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8448 	if (wmq->wmq_id == 0)
   8449 		ifp->if_flags &= ~IFF_OACTIVE;
   8450 
   8451 	/*
   8452 	 * Go through the Tx list and free mbufs for those
   8453 	 * frames which have been transmitted.
   8454 	 */
   8455 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8456 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8457 		if (limit-- == 0) {
   8458 			more = true;
   8459 			DPRINTF(WM_DEBUG_TX,
   8460 			    ("%s: TX: loop limited, job %d is not processed\n",
   8461 				device_xname(sc->sc_dev), i));
   8462 			break;
   8463 		}
   8464 
   8465 		txs = &txq->txq_soft[i];
   8466 
   8467 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8468 			device_xname(sc->sc_dev), i));
   8469 
   8470 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8471 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8472 
   8473 		status =
   8474 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8475 		if ((status & WTX_ST_DD) == 0) {
   8476 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8477 			    BUS_DMASYNC_PREREAD);
   8478 			break;
   8479 		}
   8480 
   8481 		count++;
   8482 		DPRINTF(WM_DEBUG_TX,
   8483 		    ("%s: TX: job %d done: descs %d..%d\n",
   8484 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8485 		    txs->txs_lastdesc));
   8486 
   8487 		/*
   8488 		 * XXX We should probably be using the statistics
   8489 		 * XXX registers, but I don't know if they exist
   8490 		 * XXX on chips before the i82544.
   8491 		 */
   8492 
   8493 #ifdef WM_EVENT_COUNTERS
   8494 		if (status & WTX_ST_TU)
   8495 			WM_Q_EVCNT_INCR(txq, underrun);
   8496 #endif /* WM_EVENT_COUNTERS */
   8497 
   8498 		/*
   8499 		 * 82574 and newer's document says the status field has neither
   8500 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8501 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8502 		 * Developer's Manual", 82574 datasheet and newer.
   8503 		 *
   8504 		 * XXX I saw the LC bit was set on I218 even though the media
   8505 		 * was full duplex, so the bit might be used for other
   8506 		 * meaning ...(I have no document).
   8507 		 */
   8508 
   8509 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8510 		    && ((sc->sc_type < WM_T_82574)
   8511 			|| (sc->sc_type == WM_T_80003))) {
   8512 			ifp->if_oerrors++;
   8513 			if (status & WTX_ST_LC)
   8514 				log(LOG_WARNING, "%s: late collision\n",
   8515 				    device_xname(sc->sc_dev));
   8516 			else if (status & WTX_ST_EC) {
   8517 				ifp->if_collisions +=
   8518 				    TX_COLLISION_THRESHOLD + 1;
   8519 				log(LOG_WARNING, "%s: excessive collisions\n",
   8520 				    device_xname(sc->sc_dev));
   8521 			}
   8522 		} else
   8523 			ifp->if_opackets++;
   8524 
   8525 		txq->txq_packets++;
   8526 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8527 
   8528 		txq->txq_free += txs->txs_ndesc;
   8529 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8530 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8531 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8532 		m_freem(txs->txs_mbuf);
   8533 		txs->txs_mbuf = NULL;
   8534 	}
   8535 
   8536 	/* Update the dirty transmit buffer pointer. */
   8537 	txq->txq_sdirty = i;
   8538 	DPRINTF(WM_DEBUG_TX,
   8539 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8540 
   8541 	/*
   8542 	 * If there are no more pending transmissions, cancel the watchdog
   8543 	 * timer.
   8544 	 */
   8545 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8546 		txq->txq_sending = false;
   8547 
   8548 	return more;
   8549 }
   8550 
   8551 static inline uint32_t
   8552 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8553 {
   8554 	struct wm_softc *sc = rxq->rxq_sc;
   8555 
   8556 	if (sc->sc_type == WM_T_82574)
   8557 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8558 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8559 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8560 	else
   8561 		return rxq->rxq_descs[idx].wrx_status;
   8562 }
   8563 
   8564 static inline uint32_t
   8565 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8566 {
   8567 	struct wm_softc *sc = rxq->rxq_sc;
   8568 
   8569 	if (sc->sc_type == WM_T_82574)
   8570 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8571 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8572 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8573 	else
   8574 		return rxq->rxq_descs[idx].wrx_errors;
   8575 }
   8576 
   8577 static inline uint16_t
   8578 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8579 {
   8580 	struct wm_softc *sc = rxq->rxq_sc;
   8581 
   8582 	if (sc->sc_type == WM_T_82574)
   8583 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8584 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8585 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8586 	else
   8587 		return rxq->rxq_descs[idx].wrx_special;
   8588 }
   8589 
   8590 static inline int
   8591 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8592 {
   8593 	struct wm_softc *sc = rxq->rxq_sc;
   8594 
   8595 	if (sc->sc_type == WM_T_82574)
   8596 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8597 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8598 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8599 	else
   8600 		return rxq->rxq_descs[idx].wrx_len;
   8601 }
   8602 
   8603 #ifdef WM_DEBUG
   8604 static inline uint32_t
   8605 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8606 {
   8607 	struct wm_softc *sc = rxq->rxq_sc;
   8608 
   8609 	if (sc->sc_type == WM_T_82574)
   8610 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8611 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8612 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8613 	else
   8614 		return 0;
   8615 }
   8616 
   8617 static inline uint8_t
   8618 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8619 {
   8620 	struct wm_softc *sc = rxq->rxq_sc;
   8621 
   8622 	if (sc->sc_type == WM_T_82574)
   8623 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8624 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8625 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8626 	else
   8627 		return 0;
   8628 }
   8629 #endif /* WM_DEBUG */
   8630 
   8631 static inline bool
   8632 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8633     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8634 {
   8635 
   8636 	if (sc->sc_type == WM_T_82574)
   8637 		return (status & ext_bit) != 0;
   8638 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8639 		return (status & nq_bit) != 0;
   8640 	else
   8641 		return (status & legacy_bit) != 0;
   8642 }
   8643 
   8644 static inline bool
   8645 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8646     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8647 {
   8648 
   8649 	if (sc->sc_type == WM_T_82574)
   8650 		return (error & ext_bit) != 0;
   8651 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8652 		return (error & nq_bit) != 0;
   8653 	else
   8654 		return (error & legacy_bit) != 0;
   8655 }
   8656 
   8657 static inline bool
   8658 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8659 {
   8660 
   8661 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8662 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8663 		return true;
   8664 	else
   8665 		return false;
   8666 }
   8667 
   8668 static inline bool
   8669 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8670 {
   8671 	struct wm_softc *sc = rxq->rxq_sc;
   8672 
   8673 	/* XXX missing error bit for newqueue? */
   8674 	if (wm_rxdesc_is_set_error(sc, errors,
   8675 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8676 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8677 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8678 		NQRXC_ERROR_RXE)) {
   8679 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8680 		    EXTRXC_ERROR_SE, 0))
   8681 			log(LOG_WARNING, "%s: symbol error\n",
   8682 			    device_xname(sc->sc_dev));
   8683 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8684 		    EXTRXC_ERROR_SEQ, 0))
   8685 			log(LOG_WARNING, "%s: receive sequence error\n",
   8686 			    device_xname(sc->sc_dev));
   8687 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8688 		    EXTRXC_ERROR_CE, 0))
   8689 			log(LOG_WARNING, "%s: CRC error\n",
   8690 			    device_xname(sc->sc_dev));
   8691 		return true;
   8692 	}
   8693 
   8694 	return false;
   8695 }
   8696 
   8697 static inline bool
   8698 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8699 {
   8700 	struct wm_softc *sc = rxq->rxq_sc;
   8701 
   8702 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8703 		NQRXC_STATUS_DD)) {
   8704 		/* We have processed all of the receive descriptors. */
   8705 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8706 		return false;
   8707 	}
   8708 
   8709 	return true;
   8710 }
   8711 
   8712 static inline bool
   8713 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8714     uint16_t vlantag, struct mbuf *m)
   8715 {
   8716 
   8717 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8718 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8719 		vlan_set_tag(m, le16toh(vlantag));
   8720 	}
   8721 
   8722 	return true;
   8723 }
   8724 
   8725 static inline void
   8726 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8727     uint32_t errors, struct mbuf *m)
   8728 {
   8729 	struct wm_softc *sc = rxq->rxq_sc;
   8730 
   8731 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8732 		if (wm_rxdesc_is_set_status(sc, status,
   8733 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8734 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8735 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8736 			if (wm_rxdesc_is_set_error(sc, errors,
   8737 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8738 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8739 		}
   8740 		if (wm_rxdesc_is_set_status(sc, status,
   8741 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8742 			/*
   8743 			 * Note: we don't know if this was TCP or UDP,
   8744 			 * so we just set both bits, and expect the
   8745 			 * upper layers to deal.
   8746 			 */
   8747 			WM_Q_EVCNT_INCR(rxq, tusum);
   8748 			m->m_pkthdr.csum_flags |=
   8749 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8750 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8751 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8752 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8753 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8754 		}
   8755 	}
   8756 }
   8757 
   8758 /*
   8759  * wm_rxeof:
   8760  *
   8761  *	Helper; handle receive interrupts.
   8762  */
   8763 static bool
   8764 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8765 {
   8766 	struct wm_softc *sc = rxq->rxq_sc;
   8767 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8768 	struct wm_rxsoft *rxs;
   8769 	struct mbuf *m;
   8770 	int i, len;
   8771 	int count = 0;
   8772 	uint32_t status, errors;
   8773 	uint16_t vlantag;
   8774 	bool more = false;
   8775 
   8776 	KASSERT(mutex_owned(rxq->rxq_lock));
   8777 
   8778 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8779 		if (limit-- == 0) {
   8780 			rxq->rxq_ptr = i;
   8781 			more = true;
   8782 			DPRINTF(WM_DEBUG_RX,
   8783 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8784 				device_xname(sc->sc_dev), i));
   8785 			break;
   8786 		}
   8787 
   8788 		rxs = &rxq->rxq_soft[i];
   8789 
   8790 		DPRINTF(WM_DEBUG_RX,
   8791 		    ("%s: RX: checking descriptor %d\n",
   8792 			device_xname(sc->sc_dev), i));
   8793 		wm_cdrxsync(rxq, i,
   8794 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8795 
   8796 		status = wm_rxdesc_get_status(rxq, i);
   8797 		errors = wm_rxdesc_get_errors(rxq, i);
   8798 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8799 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8800 #ifdef WM_DEBUG
   8801 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8802 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8803 #endif
   8804 
   8805 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8806 			/*
   8807 			 * Update the receive pointer holding rxq_lock
   8808 			 * consistent with increment counter.
   8809 			 */
   8810 			rxq->rxq_ptr = i;
   8811 			break;
   8812 		}
   8813 
   8814 		count++;
   8815 		if (__predict_false(rxq->rxq_discard)) {
   8816 			DPRINTF(WM_DEBUG_RX,
   8817 			    ("%s: RX: discarding contents of descriptor %d\n",
   8818 				device_xname(sc->sc_dev), i));
   8819 			wm_init_rxdesc(rxq, i);
   8820 			if (wm_rxdesc_is_eop(rxq, status)) {
   8821 				/* Reset our state. */
   8822 				DPRINTF(WM_DEBUG_RX,
   8823 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8824 					device_xname(sc->sc_dev)));
   8825 				rxq->rxq_discard = 0;
   8826 			}
   8827 			continue;
   8828 		}
   8829 
   8830 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8831 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8832 
   8833 		m = rxs->rxs_mbuf;
   8834 
   8835 		/*
   8836 		 * Add a new receive buffer to the ring, unless of
   8837 		 * course the length is zero. Treat the latter as a
   8838 		 * failed mapping.
   8839 		 */
   8840 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8841 			/*
   8842 			 * Failed, throw away what we've done so
   8843 			 * far, and discard the rest of the packet.
   8844 			 */
   8845 			ifp->if_ierrors++;
   8846 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8847 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8848 			wm_init_rxdesc(rxq, i);
   8849 			if (!wm_rxdesc_is_eop(rxq, status))
   8850 				rxq->rxq_discard = 1;
   8851 			if (rxq->rxq_head != NULL)
   8852 				m_freem(rxq->rxq_head);
   8853 			WM_RXCHAIN_RESET(rxq);
   8854 			DPRINTF(WM_DEBUG_RX,
   8855 			    ("%s: RX: Rx buffer allocation failed, "
   8856 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8857 				rxq->rxq_discard ? " (discard)" : ""));
   8858 			continue;
   8859 		}
   8860 
   8861 		m->m_len = len;
   8862 		rxq->rxq_len += len;
   8863 		DPRINTF(WM_DEBUG_RX,
   8864 		    ("%s: RX: buffer at %p len %d\n",
   8865 			device_xname(sc->sc_dev), m->m_data, len));
   8866 
   8867 		/* If this is not the end of the packet, keep looking. */
   8868 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8869 			WM_RXCHAIN_LINK(rxq, m);
   8870 			DPRINTF(WM_DEBUG_RX,
   8871 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8872 				device_xname(sc->sc_dev), rxq->rxq_len));
   8873 			continue;
   8874 		}
   8875 
   8876 		/*
   8877 		 * Okay, we have the entire packet now. The chip is
   8878 		 * configured to include the FCS except I350 and I21[01]
   8879 		 * (not all chips can be configured to strip it),
   8880 		 * so we need to trim it.
   8881 		 * May need to adjust length of previous mbuf in the
   8882 		 * chain if the current mbuf is too short.
   8883 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8884 		 * is always set in I350, so we don't trim it.
   8885 		 */
   8886 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8887 		    && (sc->sc_type != WM_T_I210)
   8888 		    && (sc->sc_type != WM_T_I211)) {
   8889 			if (m->m_len < ETHER_CRC_LEN) {
   8890 				rxq->rxq_tail->m_len
   8891 				    -= (ETHER_CRC_LEN - m->m_len);
   8892 				m->m_len = 0;
   8893 			} else
   8894 				m->m_len -= ETHER_CRC_LEN;
   8895 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8896 		} else
   8897 			len = rxq->rxq_len;
   8898 
   8899 		WM_RXCHAIN_LINK(rxq, m);
   8900 
   8901 		*rxq->rxq_tailp = NULL;
   8902 		m = rxq->rxq_head;
   8903 
   8904 		WM_RXCHAIN_RESET(rxq);
   8905 
   8906 		DPRINTF(WM_DEBUG_RX,
   8907 		    ("%s: RX: have entire packet, len -> %d\n",
   8908 			device_xname(sc->sc_dev), len));
   8909 
   8910 		/* If an error occurred, update stats and drop the packet. */
   8911 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8912 			m_freem(m);
   8913 			continue;
   8914 		}
   8915 
   8916 		/* No errors.  Receive the packet. */
   8917 		m_set_rcvif(m, ifp);
   8918 		m->m_pkthdr.len = len;
   8919 		/*
   8920 		 * TODO
   8921 		 * should be save rsshash and rsstype to this mbuf.
   8922 		 */
   8923 		DPRINTF(WM_DEBUG_RX,
   8924 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8925 			device_xname(sc->sc_dev), rsstype, rsshash));
   8926 
   8927 		/*
   8928 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8929 		 * for us.  Associate the tag with the packet.
   8930 		 */
   8931 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8932 			continue;
   8933 
   8934 		/* Set up checksum info for this packet. */
   8935 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8936 		/*
   8937 		 * Update the receive pointer holding rxq_lock consistent with
   8938 		 * increment counter.
   8939 		 */
   8940 		rxq->rxq_ptr = i;
   8941 		rxq->rxq_packets++;
   8942 		rxq->rxq_bytes += len;
   8943 		mutex_exit(rxq->rxq_lock);
   8944 
   8945 		/* Pass it on. */
   8946 		if_percpuq_enqueue(sc->sc_ipq, m);
   8947 
   8948 		mutex_enter(rxq->rxq_lock);
   8949 
   8950 		if (rxq->rxq_stopping)
   8951 			break;
   8952 	}
   8953 
   8954 	DPRINTF(WM_DEBUG_RX,
   8955 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8956 
   8957 	return more;
   8958 }
   8959 
   8960 /*
   8961  * wm_linkintr_gmii:
   8962  *
   8963  *	Helper; handle link interrupts for GMII.
   8964  */
   8965 static void
   8966 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8967 {
   8968 	device_t dev = sc->sc_dev;
   8969 	uint32_t status, reg;
   8970 	bool link;
   8971 	int rv;
   8972 
   8973 	KASSERT(WM_CORE_LOCKED(sc));
   8974 
   8975 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8976 		__func__));
   8977 
   8978 	if ((icr & ICR_LSC) == 0) {
   8979 		if (icr & ICR_RXSEQ)
   8980 			DPRINTF(WM_DEBUG_LINK,
   8981 			    ("%s: LINK Receive sequence error\n",
   8982 				device_xname(dev)));
   8983 		return;
   8984 	}
   8985 
   8986 	/* Link status changed */
   8987 	status = CSR_READ(sc, WMREG_STATUS);
   8988 	link = status & STATUS_LU;
   8989 	if (link) {
   8990 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8991 			device_xname(dev),
   8992 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8993 	} else {
   8994 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8995 			device_xname(dev)));
   8996 	}
   8997 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8998 		wm_gig_downshift_workaround_ich8lan(sc);
   8999 
   9000 	if ((sc->sc_type == WM_T_ICH8)
   9001 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9002 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9003 	}
   9004 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9005 		device_xname(dev)));
   9006 	mii_pollstat(&sc->sc_mii);
   9007 	if (sc->sc_type == WM_T_82543) {
   9008 		int miistatus, active;
   9009 
   9010 		/*
   9011 		 * With 82543, we need to force speed and
   9012 		 * duplex on the MAC equal to what the PHY
   9013 		 * speed and duplex configuration is.
   9014 		 */
   9015 		miistatus = sc->sc_mii.mii_media_status;
   9016 
   9017 		if (miistatus & IFM_ACTIVE) {
   9018 			active = sc->sc_mii.mii_media_active;
   9019 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9020 			switch (IFM_SUBTYPE(active)) {
   9021 			case IFM_10_T:
   9022 				sc->sc_ctrl |= CTRL_SPEED_10;
   9023 				break;
   9024 			case IFM_100_TX:
   9025 				sc->sc_ctrl |= CTRL_SPEED_100;
   9026 				break;
   9027 			case IFM_1000_T:
   9028 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9029 				break;
   9030 			default:
   9031 				/*
   9032 				 * Fiber?
   9033 				 * Shoud not enter here.
   9034 				 */
   9035 				device_printf(dev, "unknown media (%x)\n",
   9036 				    active);
   9037 				break;
   9038 			}
   9039 			if (active & IFM_FDX)
   9040 				sc->sc_ctrl |= CTRL_FD;
   9041 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9042 		}
   9043 	} else if (sc->sc_type == WM_T_PCH) {
   9044 		wm_k1_gig_workaround_hv(sc,
   9045 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9046 	}
   9047 
   9048 	/*
   9049 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9050 	 * aggressive resulting in many collisions. To avoid this, increase
   9051 	 * the IPG and reduce Rx latency in the PHY.
   9052 	 */
   9053 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9054 	    && link) {
   9055 		uint32_t tipg_reg;
   9056 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9057 		bool fdx;
   9058 		uint16_t emi_addr, emi_val;
   9059 
   9060 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9061 		tipg_reg &= ~TIPG_IPGT_MASK;
   9062 		fdx = status & STATUS_FD;
   9063 
   9064 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9065 			tipg_reg |= 0xff;
   9066 			/* Reduce Rx latency in analog PHY */
   9067 			emi_val = 0;
   9068 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9069 		    fdx && speed != STATUS_SPEED_1000) {
   9070 			tipg_reg |= 0xc;
   9071 			emi_val = 1;
   9072 		} else {
   9073 			/* Roll back the default values */
   9074 			tipg_reg |= 0x08;
   9075 			emi_val = 1;
   9076 		}
   9077 
   9078 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9079 
   9080 		rv = sc->phy.acquire(sc);
   9081 		if (rv)
   9082 			return;
   9083 
   9084 		if (sc->sc_type == WM_T_PCH2)
   9085 			emi_addr = I82579_RX_CONFIG;
   9086 		else
   9087 			emi_addr = I217_RX_CONFIG;
   9088 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9089 
   9090 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9091 			uint16_t phy_reg;
   9092 
   9093 			sc->phy.readreg_locked(dev, 2,
   9094 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9095 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9096 			if (speed == STATUS_SPEED_100
   9097 			    || speed == STATUS_SPEED_10)
   9098 				phy_reg |= 0x3e8;
   9099 			else
   9100 				phy_reg |= 0xfa;
   9101 			sc->phy.writereg_locked(dev, 2,
   9102 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9103 
   9104 			if (speed == STATUS_SPEED_1000) {
   9105 				sc->phy.readreg_locked(dev, 2,
   9106 				    HV_PM_CTRL, &phy_reg);
   9107 
   9108 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9109 
   9110 				sc->phy.writereg_locked(dev, 2,
   9111 				    HV_PM_CTRL, phy_reg);
   9112 			}
   9113 		}
   9114 		sc->phy.release(sc);
   9115 
   9116 		if (rv)
   9117 			return;
   9118 
   9119 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9120 			uint16_t data, ptr_gap;
   9121 
   9122 			if (speed == STATUS_SPEED_1000) {
   9123 				rv = sc->phy.acquire(sc);
   9124 				if (rv)
   9125 					return;
   9126 
   9127 				rv = sc->phy.readreg_locked(dev, 2,
   9128 				    I219_UNKNOWN1, &data);
   9129 				if (rv) {
   9130 					sc->phy.release(sc);
   9131 					return;
   9132 				}
   9133 
   9134 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9135 				if (ptr_gap < 0x18) {
   9136 					data &= ~(0x3ff << 2);
   9137 					data |= (0x18 << 2);
   9138 					rv = sc->phy.writereg_locked(dev,
   9139 					    2, I219_UNKNOWN1, data);
   9140 				}
   9141 				sc->phy.release(sc);
   9142 				if (rv)
   9143 					return;
   9144 			} else {
   9145 				rv = sc->phy.acquire(sc);
   9146 				if (rv)
   9147 					return;
   9148 
   9149 				rv = sc->phy.writereg_locked(dev, 2,
   9150 				    I219_UNKNOWN1, 0xc023);
   9151 				sc->phy.release(sc);
   9152 				if (rv)
   9153 					return;
   9154 
   9155 			}
   9156 		}
   9157 	}
   9158 
   9159 	/*
   9160 	 * I217 Packet Loss issue:
   9161 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9162 	 * on power up.
   9163 	 * Set the Beacon Duration for I217 to 8 usec
   9164 	 */
   9165 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9166 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9167 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9168 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9169 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9170 	}
   9171 
   9172 	/* Work-around I218 hang issue */
   9173 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9174 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9175 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9176 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9177 		wm_k1_workaround_lpt_lp(sc, link);
   9178 
   9179 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9180 		/*
   9181 		 * Set platform power management values for Latency
   9182 		 * Tolerance Reporting (LTR)
   9183 		 */
   9184 		wm_platform_pm_pch_lpt(sc,
   9185 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9186 	}
   9187 
   9188 	/* Clear link partner's EEE ability */
   9189 	sc->eee_lp_ability = 0;
   9190 
   9191 	/* FEXTNVM6 K1-off workaround */
   9192 	if (sc->sc_type == WM_T_PCH_SPT) {
   9193 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9194 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9195 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9196 		else
   9197 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9198 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9199 	}
   9200 
   9201 	if (!link)
   9202 		return;
   9203 
   9204 	switch (sc->sc_type) {
   9205 	case WM_T_PCH2:
   9206 		wm_k1_workaround_lv(sc);
   9207 		/* FALLTHROUGH */
   9208 	case WM_T_PCH:
   9209 		if (sc->sc_phytype == WMPHY_82578)
   9210 			wm_link_stall_workaround_hv(sc);
   9211 		break;
   9212 	default:
   9213 		break;
   9214 	}
   9215 
   9216 	/* Enable/Disable EEE after link up */
   9217 	if (sc->sc_phytype > WMPHY_82579)
   9218 		wm_set_eee_pchlan(sc);
   9219 }
   9220 
   9221 /*
   9222  * wm_linkintr_tbi:
   9223  *
   9224  *	Helper; handle link interrupts for TBI mode.
   9225  */
   9226 static void
   9227 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9228 {
   9229 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9230 	uint32_t status;
   9231 
   9232 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9233 		__func__));
   9234 
   9235 	status = CSR_READ(sc, WMREG_STATUS);
   9236 	if (icr & ICR_LSC) {
   9237 		wm_check_for_link(sc);
   9238 		if (status & STATUS_LU) {
   9239 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9240 				device_xname(sc->sc_dev),
   9241 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9242 			/*
   9243 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9244 			 * so we should update sc->sc_ctrl
   9245 			 */
   9246 
   9247 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9248 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9249 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9250 			if (status & STATUS_FD)
   9251 				sc->sc_tctl |=
   9252 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9253 			else
   9254 				sc->sc_tctl |=
   9255 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9256 			if (sc->sc_ctrl & CTRL_TFCE)
   9257 				sc->sc_fcrtl |= FCRTL_XONE;
   9258 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9259 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9260 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9261 			sc->sc_tbi_linkup = 1;
   9262 			if_link_state_change(ifp, LINK_STATE_UP);
   9263 		} else {
   9264 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9265 				device_xname(sc->sc_dev)));
   9266 			sc->sc_tbi_linkup = 0;
   9267 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9268 		}
   9269 		/* Update LED */
   9270 		wm_tbi_serdes_set_linkled(sc);
   9271 	} else if (icr & ICR_RXSEQ)
   9272 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9273 			device_xname(sc->sc_dev)));
   9274 }
   9275 
   9276 /*
   9277  * wm_linkintr_serdes:
   9278  *
   9279  *	Helper; handle link interrupts for TBI mode.
   9280  */
   9281 static void
   9282 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9283 {
   9284 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9285 	struct mii_data *mii = &sc->sc_mii;
   9286 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9287 	uint32_t pcs_adv, pcs_lpab, reg;
   9288 
   9289 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9290 		__func__));
   9291 
   9292 	if (icr & ICR_LSC) {
   9293 		/* Check PCS */
   9294 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9295 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9296 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9297 				device_xname(sc->sc_dev)));
   9298 			mii->mii_media_status |= IFM_ACTIVE;
   9299 			sc->sc_tbi_linkup = 1;
   9300 			if_link_state_change(ifp, LINK_STATE_UP);
   9301 		} else {
   9302 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9303 				device_xname(sc->sc_dev)));
   9304 			mii->mii_media_status |= IFM_NONE;
   9305 			sc->sc_tbi_linkup = 0;
   9306 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9307 			wm_tbi_serdes_set_linkled(sc);
   9308 			return;
   9309 		}
   9310 		mii->mii_media_active |= IFM_1000_SX;
   9311 		if ((reg & PCS_LSTS_FDX) != 0)
   9312 			mii->mii_media_active |= IFM_FDX;
   9313 		else
   9314 			mii->mii_media_active |= IFM_HDX;
   9315 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9316 			/* Check flow */
   9317 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9318 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9319 				DPRINTF(WM_DEBUG_LINK,
   9320 				    ("XXX LINKOK but not ACOMP\n"));
   9321 				return;
   9322 			}
   9323 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9324 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9325 			DPRINTF(WM_DEBUG_LINK,
   9326 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9327 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9328 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9329 				mii->mii_media_active |= IFM_FLOW
   9330 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9331 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9332 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9333 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9334 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9335 				mii->mii_media_active |= IFM_FLOW
   9336 				    | IFM_ETH_TXPAUSE;
   9337 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9338 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9339 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9340 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9341 				mii->mii_media_active |= IFM_FLOW
   9342 				    | IFM_ETH_RXPAUSE;
   9343 		}
   9344 		/* Update LED */
   9345 		wm_tbi_serdes_set_linkled(sc);
   9346 	} else
   9347 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9348 		    device_xname(sc->sc_dev)));
   9349 }
   9350 
   9351 /*
   9352  * wm_linkintr:
   9353  *
   9354  *	Helper; handle link interrupts.
   9355  */
   9356 static void
   9357 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9358 {
   9359 
   9360 	KASSERT(WM_CORE_LOCKED(sc));
   9361 
   9362 	if (sc->sc_flags & WM_F_HAS_MII)
   9363 		wm_linkintr_gmii(sc, icr);
   9364 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9365 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9366 		wm_linkintr_serdes(sc, icr);
   9367 	else
   9368 		wm_linkintr_tbi(sc, icr);
   9369 }
   9370 
   9371 /*
   9372  * wm_intr_legacy:
   9373  *
   9374  *	Interrupt service routine for INTx and MSI.
   9375  */
   9376 static int
   9377 wm_intr_legacy(void *arg)
   9378 {
   9379 	struct wm_softc *sc = arg;
   9380 	struct wm_queue *wmq = &sc->sc_queue[0];
   9381 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9382 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9383 	uint32_t icr, rndval = 0;
   9384 	int handled = 0;
   9385 
   9386 	while (1 /* CONSTCOND */) {
   9387 		icr = CSR_READ(sc, WMREG_ICR);
   9388 		if ((icr & sc->sc_icr) == 0)
   9389 			break;
   9390 		if (handled == 0)
   9391 			DPRINTF(WM_DEBUG_TX,
   9392 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9393 		if (rndval == 0)
   9394 			rndval = icr;
   9395 
   9396 		mutex_enter(rxq->rxq_lock);
   9397 
   9398 		if (rxq->rxq_stopping) {
   9399 			mutex_exit(rxq->rxq_lock);
   9400 			break;
   9401 		}
   9402 
   9403 		handled = 1;
   9404 
   9405 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9406 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9407 			DPRINTF(WM_DEBUG_RX,
   9408 			    ("%s: RX: got Rx intr 0x%08x\n",
   9409 				device_xname(sc->sc_dev),
   9410 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9411 			WM_Q_EVCNT_INCR(rxq, intr);
   9412 		}
   9413 #endif
   9414 		/*
   9415 		 * wm_rxeof() does *not* call upper layer functions directly,
   9416 		 * as if_percpuq_enqueue() just call softint_schedule().
   9417 		 * So, we can call wm_rxeof() in interrupt context.
   9418 		 */
   9419 		wm_rxeof(rxq, UINT_MAX);
   9420 		/* Fill lower bits with RX index. See below for the upper. */
   9421 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9422 
   9423 		mutex_exit(rxq->rxq_lock);
   9424 		mutex_enter(txq->txq_lock);
   9425 
   9426 		if (txq->txq_stopping) {
   9427 			mutex_exit(txq->txq_lock);
   9428 			break;
   9429 		}
   9430 
   9431 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9432 		if (icr & ICR_TXDW) {
   9433 			DPRINTF(WM_DEBUG_TX,
   9434 			    ("%s: TX: got TXDW interrupt\n",
   9435 				device_xname(sc->sc_dev)));
   9436 			WM_Q_EVCNT_INCR(txq, txdw);
   9437 		}
   9438 #endif
   9439 		wm_txeof(txq, UINT_MAX);
   9440 		/* Fill upper bits with TX index. See above for the lower. */
   9441 		rndval = txq->txq_next * WM_NRXDESC;
   9442 
   9443 		mutex_exit(txq->txq_lock);
   9444 		WM_CORE_LOCK(sc);
   9445 
   9446 		if (sc->sc_core_stopping) {
   9447 			WM_CORE_UNLOCK(sc);
   9448 			break;
   9449 		}
   9450 
   9451 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9452 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9453 			wm_linkintr(sc, icr);
   9454 		}
   9455 
   9456 		WM_CORE_UNLOCK(sc);
   9457 
   9458 		if (icr & ICR_RXO) {
   9459 #if defined(WM_DEBUG)
   9460 			log(LOG_WARNING, "%s: Receive overrun\n",
   9461 			    device_xname(sc->sc_dev));
   9462 #endif /* defined(WM_DEBUG) */
   9463 		}
   9464 	}
   9465 
   9466 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9467 
   9468 	if (handled) {
   9469 		/* Try to get more packets going. */
   9470 		softint_schedule(wmq->wmq_si);
   9471 	}
   9472 
   9473 	return handled;
   9474 }
   9475 
   9476 static inline void
   9477 wm_txrxintr_disable(struct wm_queue *wmq)
   9478 {
   9479 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9480 
   9481 	if (sc->sc_type == WM_T_82574)
   9482 		CSR_WRITE(sc, WMREG_IMC,
   9483 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9484 	else if (sc->sc_type == WM_T_82575)
   9485 		CSR_WRITE(sc, WMREG_EIMC,
   9486 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9487 	else
   9488 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9489 }
   9490 
   9491 static inline void
   9492 wm_txrxintr_enable(struct wm_queue *wmq)
   9493 {
   9494 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9495 
   9496 	wm_itrs_calculate(sc, wmq);
   9497 
   9498 	/*
   9499 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9500 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9501 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9502 	 * while each wm_handle_queue(wmq) is runnig.
   9503 	 */
   9504 	if (sc->sc_type == WM_T_82574)
   9505 		CSR_WRITE(sc, WMREG_IMS,
   9506 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9507 	else if (sc->sc_type == WM_T_82575)
   9508 		CSR_WRITE(sc, WMREG_EIMS,
   9509 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9510 	else
   9511 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9512 }
   9513 
   9514 static int
   9515 wm_txrxintr_msix(void *arg)
   9516 {
   9517 	struct wm_queue *wmq = arg;
   9518 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9519 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9520 	struct wm_softc *sc = txq->txq_sc;
   9521 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9522 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9523 	uint32_t rndval = 0;
   9524 	bool txmore;
   9525 	bool rxmore;
   9526 
   9527 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9528 
   9529 	DPRINTF(WM_DEBUG_TX,
   9530 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9531 
   9532 	wm_txrxintr_disable(wmq);
   9533 
   9534 	mutex_enter(txq->txq_lock);
   9535 
   9536 	if (txq->txq_stopping) {
   9537 		mutex_exit(txq->txq_lock);
   9538 		return 0;
   9539 	}
   9540 
   9541 	WM_Q_EVCNT_INCR(txq, txdw);
   9542 	txmore = wm_txeof(txq, txlimit);
   9543 	/* Fill upper bits with TX index. See below for the lower. */
   9544 	rndval = txq->txq_next * WM_NRXDESC;
   9545 	/* wm_deferred start() is done in wm_handle_queue(). */
   9546 	mutex_exit(txq->txq_lock);
   9547 
   9548 	DPRINTF(WM_DEBUG_RX,
   9549 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9550 	mutex_enter(rxq->rxq_lock);
   9551 
   9552 	if (rxq->rxq_stopping) {
   9553 		mutex_exit(rxq->rxq_lock);
   9554 		return 0;
   9555 	}
   9556 
   9557 	WM_Q_EVCNT_INCR(rxq, intr);
   9558 	rxmore = wm_rxeof(rxq, rxlimit);
   9559 
   9560 	/* Fill lower bits with RX index. See above for the upper. */
   9561 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9562 	mutex_exit(rxq->rxq_lock);
   9563 
   9564 	wm_itrs_writereg(sc, wmq);
   9565 
   9566 	/*
   9567 	 * This function is called in the hardware interrupt context and
   9568 	 * per-CPU, so it's not required to take a lock.
   9569 	 */
   9570 	if (rndval != 0)
   9571 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9572 
   9573 	if (txmore || rxmore)
   9574 		softint_schedule(wmq->wmq_si);
   9575 	else
   9576 		wm_txrxintr_enable(wmq);
   9577 
   9578 	return 1;
   9579 }
   9580 
   9581 static void
   9582 wm_handle_queue(void *arg)
   9583 {
   9584 	struct wm_queue *wmq = arg;
   9585 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9586 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9587 	struct wm_softc *sc = txq->txq_sc;
   9588 	u_int txlimit = sc->sc_tx_process_limit;
   9589 	u_int rxlimit = sc->sc_rx_process_limit;
   9590 	bool txmore;
   9591 	bool rxmore;
   9592 
   9593 	mutex_enter(txq->txq_lock);
   9594 	if (txq->txq_stopping) {
   9595 		mutex_exit(txq->txq_lock);
   9596 		return;
   9597 	}
   9598 	txmore = wm_txeof(txq, txlimit);
   9599 	wm_deferred_start_locked(txq);
   9600 	mutex_exit(txq->txq_lock);
   9601 
   9602 	mutex_enter(rxq->rxq_lock);
   9603 	if (rxq->rxq_stopping) {
   9604 		mutex_exit(rxq->rxq_lock);
   9605 		return;
   9606 	}
   9607 	WM_Q_EVCNT_INCR(rxq, defer);
   9608 	rxmore = wm_rxeof(rxq, rxlimit);
   9609 	mutex_exit(rxq->rxq_lock);
   9610 
   9611 	if (txmore || rxmore)
   9612 		softint_schedule(wmq->wmq_si);
   9613 	else
   9614 		wm_txrxintr_enable(wmq);
   9615 }
   9616 
   9617 /*
   9618  * wm_linkintr_msix:
   9619  *
   9620  *	Interrupt service routine for link status change for MSI-X.
   9621  */
   9622 static int
   9623 wm_linkintr_msix(void *arg)
   9624 {
   9625 	struct wm_softc *sc = arg;
   9626 	uint32_t reg;
   9627 	bool has_rxo;
   9628 
   9629 	DPRINTF(WM_DEBUG_LINK,
   9630 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9631 
   9632 	reg = CSR_READ(sc, WMREG_ICR);
   9633 	WM_CORE_LOCK(sc);
   9634 	if (sc->sc_core_stopping)
   9635 		goto out;
   9636 
   9637 	if ((reg & ICR_LSC) != 0) {
   9638 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9639 		wm_linkintr(sc, ICR_LSC);
   9640 	}
   9641 
   9642 	/*
   9643 	 * XXX 82574 MSI-X mode workaround
   9644 	 *
   9645 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9646 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9647 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9648 	 * interrupts by writing WMREG_ICS to process receive packets.
   9649 	 */
   9650 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9651 #if defined(WM_DEBUG)
   9652 		log(LOG_WARNING, "%s: Receive overrun\n",
   9653 		    device_xname(sc->sc_dev));
   9654 #endif /* defined(WM_DEBUG) */
   9655 
   9656 		has_rxo = true;
   9657 		/*
   9658 		 * The RXO interrupt is very high rate when receive traffic is
   9659 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9660 		 * interrupts. ICR_OTHER will be enabled at the end of
   9661 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9662 		 * ICR_RXQ(1) interrupts.
   9663 		 */
   9664 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9665 
   9666 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9667 	}
   9668 
   9669 
   9670 
   9671 out:
   9672 	WM_CORE_UNLOCK(sc);
   9673 
   9674 	if (sc->sc_type == WM_T_82574) {
   9675 		if (!has_rxo)
   9676 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9677 		else
   9678 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9679 	} else if (sc->sc_type == WM_T_82575)
   9680 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9681 	else
   9682 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9683 
   9684 	return 1;
   9685 }
   9686 
   9687 /*
   9688  * Media related.
   9689  * GMII, SGMII, TBI (and SERDES)
   9690  */
   9691 
   9692 /* Common */
   9693 
   9694 /*
   9695  * wm_tbi_serdes_set_linkled:
   9696  *
   9697  *	Update the link LED on TBI and SERDES devices.
   9698  */
   9699 static void
   9700 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9701 {
   9702 
   9703 	if (sc->sc_tbi_linkup)
   9704 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9705 	else
   9706 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9707 
   9708 	/* 82540 or newer devices are active low */
   9709 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9710 
   9711 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9712 }
   9713 
   9714 /* GMII related */
   9715 
   9716 /*
   9717  * wm_gmii_reset:
   9718  *
   9719  *	Reset the PHY.
   9720  */
   9721 static void
   9722 wm_gmii_reset(struct wm_softc *sc)
   9723 {
   9724 	uint32_t reg;
   9725 	int rv;
   9726 
   9727 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9728 		device_xname(sc->sc_dev), __func__));
   9729 
   9730 	rv = sc->phy.acquire(sc);
   9731 	if (rv != 0) {
   9732 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9733 		    __func__);
   9734 		return;
   9735 	}
   9736 
   9737 	switch (sc->sc_type) {
   9738 	case WM_T_82542_2_0:
   9739 	case WM_T_82542_2_1:
   9740 		/* null */
   9741 		break;
   9742 	case WM_T_82543:
   9743 		/*
   9744 		 * With 82543, we need to force speed and duplex on the MAC
   9745 		 * equal to what the PHY speed and duplex configuration is.
   9746 		 * In addition, we need to perform a hardware reset on the PHY
   9747 		 * to take it out of reset.
   9748 		 */
   9749 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9750 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9751 
   9752 		/* The PHY reset pin is active-low. */
   9753 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9754 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9755 		    CTRL_EXT_SWDPIN(4));
   9756 		reg |= CTRL_EXT_SWDPIO(4);
   9757 
   9758 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9759 		CSR_WRITE_FLUSH(sc);
   9760 		delay(10*1000);
   9761 
   9762 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9763 		CSR_WRITE_FLUSH(sc);
   9764 		delay(150);
   9765 #if 0
   9766 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9767 #endif
   9768 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9769 		break;
   9770 	case WM_T_82544:	/* Reset 10000us */
   9771 	case WM_T_82540:
   9772 	case WM_T_82545:
   9773 	case WM_T_82545_3:
   9774 	case WM_T_82546:
   9775 	case WM_T_82546_3:
   9776 	case WM_T_82541:
   9777 	case WM_T_82541_2:
   9778 	case WM_T_82547:
   9779 	case WM_T_82547_2:
   9780 	case WM_T_82571:	/* Reset 100us */
   9781 	case WM_T_82572:
   9782 	case WM_T_82573:
   9783 	case WM_T_82574:
   9784 	case WM_T_82575:
   9785 	case WM_T_82576:
   9786 	case WM_T_82580:
   9787 	case WM_T_I350:
   9788 	case WM_T_I354:
   9789 	case WM_T_I210:
   9790 	case WM_T_I211:
   9791 	case WM_T_82583:
   9792 	case WM_T_80003:
   9793 		/* Generic reset */
   9794 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9795 		CSR_WRITE_FLUSH(sc);
   9796 		delay(20000);
   9797 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9798 		CSR_WRITE_FLUSH(sc);
   9799 		delay(20000);
   9800 
   9801 		if ((sc->sc_type == WM_T_82541)
   9802 		    || (sc->sc_type == WM_T_82541_2)
   9803 		    || (sc->sc_type == WM_T_82547)
   9804 		    || (sc->sc_type == WM_T_82547_2)) {
   9805 			/* Workaround for igp are done in igp_reset() */
   9806 			/* XXX add code to set LED after phy reset */
   9807 		}
   9808 		break;
   9809 	case WM_T_ICH8:
   9810 	case WM_T_ICH9:
   9811 	case WM_T_ICH10:
   9812 	case WM_T_PCH:
   9813 	case WM_T_PCH2:
   9814 	case WM_T_PCH_LPT:
   9815 	case WM_T_PCH_SPT:
   9816 	case WM_T_PCH_CNP:
   9817 		/* Generic reset */
   9818 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9819 		CSR_WRITE_FLUSH(sc);
   9820 		delay(100);
   9821 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9822 		CSR_WRITE_FLUSH(sc);
   9823 		delay(150);
   9824 		break;
   9825 	default:
   9826 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9827 		    __func__);
   9828 		break;
   9829 	}
   9830 
   9831 	sc->phy.release(sc);
   9832 
   9833 	/* get_cfg_done */
   9834 	wm_get_cfg_done(sc);
   9835 
   9836 	/* Extra setup */
   9837 	switch (sc->sc_type) {
   9838 	case WM_T_82542_2_0:
   9839 	case WM_T_82542_2_1:
   9840 	case WM_T_82543:
   9841 	case WM_T_82544:
   9842 	case WM_T_82540:
   9843 	case WM_T_82545:
   9844 	case WM_T_82545_3:
   9845 	case WM_T_82546:
   9846 	case WM_T_82546_3:
   9847 	case WM_T_82541_2:
   9848 	case WM_T_82547_2:
   9849 	case WM_T_82571:
   9850 	case WM_T_82572:
   9851 	case WM_T_82573:
   9852 	case WM_T_82574:
   9853 	case WM_T_82583:
   9854 	case WM_T_82575:
   9855 	case WM_T_82576:
   9856 	case WM_T_82580:
   9857 	case WM_T_I350:
   9858 	case WM_T_I354:
   9859 	case WM_T_I210:
   9860 	case WM_T_I211:
   9861 	case WM_T_80003:
   9862 		/* Null */
   9863 		break;
   9864 	case WM_T_82541:
   9865 	case WM_T_82547:
   9866 		/* XXX Configure actively LED after PHY reset */
   9867 		break;
   9868 	case WM_T_ICH8:
   9869 	case WM_T_ICH9:
   9870 	case WM_T_ICH10:
   9871 	case WM_T_PCH:
   9872 	case WM_T_PCH2:
   9873 	case WM_T_PCH_LPT:
   9874 	case WM_T_PCH_SPT:
   9875 	case WM_T_PCH_CNP:
   9876 		wm_phy_post_reset(sc);
   9877 		break;
   9878 	default:
   9879 		panic("%s: unknown type\n", __func__);
   9880 		break;
   9881 	}
   9882 }
   9883 
   9884 /*
   9885  * Setup sc_phytype and mii_{read|write}reg.
   9886  *
   9887  *  To identify PHY type, correct read/write function should be selected.
   9888  * To select correct read/write function, PCI ID or MAC type are required
   9889  * without accessing PHY registers.
   9890  *
   9891  *  On the first call of this function, PHY ID is not known yet. Check
   9892  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9893  * result might be incorrect.
   9894  *
   9895  *  In the second call, PHY OUI and model is used to identify PHY type.
   9896  * It might not be perfpect because of the lack of compared entry, but it
   9897  * would be better than the first call.
   9898  *
   9899  *  If the detected new result and previous assumption is different,
   9900  * diagnous message will be printed.
   9901  */
   9902 static void
   9903 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9904     uint16_t phy_model)
   9905 {
   9906 	device_t dev = sc->sc_dev;
   9907 	struct mii_data *mii = &sc->sc_mii;
   9908 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9909 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9910 	mii_readreg_t new_readreg;
   9911 	mii_writereg_t new_writereg;
   9912 
   9913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9914 		device_xname(sc->sc_dev), __func__));
   9915 
   9916 	if (mii->mii_readreg == NULL) {
   9917 		/*
   9918 		 *  This is the first call of this function. For ICH and PCH
   9919 		 * variants, it's difficult to determine the PHY access method
   9920 		 * by sc_type, so use the PCI product ID for some devices.
   9921 		 */
   9922 
   9923 		switch (sc->sc_pcidevid) {
   9924 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9925 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9926 			/* 82577 */
   9927 			new_phytype = WMPHY_82577;
   9928 			break;
   9929 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9930 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9931 			/* 82578 */
   9932 			new_phytype = WMPHY_82578;
   9933 			break;
   9934 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9935 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9936 			/* 82579 */
   9937 			new_phytype = WMPHY_82579;
   9938 			break;
   9939 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9940 		case PCI_PRODUCT_INTEL_82801I_BM:
   9941 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9942 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9943 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9944 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9945 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9946 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9947 			/* ICH8, 9, 10 with 82567 */
   9948 			new_phytype = WMPHY_BM;
   9949 			break;
   9950 		default:
   9951 			break;
   9952 		}
   9953 	} else {
   9954 		/* It's not the first call. Use PHY OUI and model */
   9955 		switch (phy_oui) {
   9956 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9957 			switch (phy_model) {
   9958 			case 0x0004: /* XXX */
   9959 				new_phytype = WMPHY_82578;
   9960 				break;
   9961 			default:
   9962 				break;
   9963 			}
   9964 			break;
   9965 		case MII_OUI_xxMARVELL:
   9966 			switch (phy_model) {
   9967 			case MII_MODEL_xxMARVELL_I210:
   9968 				new_phytype = WMPHY_I210;
   9969 				break;
   9970 			case MII_MODEL_xxMARVELL_E1011:
   9971 			case MII_MODEL_xxMARVELL_E1000_3:
   9972 			case MII_MODEL_xxMARVELL_E1000_5:
   9973 			case MII_MODEL_xxMARVELL_E1112:
   9974 				new_phytype = WMPHY_M88;
   9975 				break;
   9976 			case MII_MODEL_xxMARVELL_E1149:
   9977 				new_phytype = WMPHY_BM;
   9978 				break;
   9979 			case MII_MODEL_xxMARVELL_E1111:
   9980 			case MII_MODEL_xxMARVELL_I347:
   9981 			case MII_MODEL_xxMARVELL_E1512:
   9982 			case MII_MODEL_xxMARVELL_E1340M:
   9983 			case MII_MODEL_xxMARVELL_E1543:
   9984 				new_phytype = WMPHY_M88;
   9985 				break;
   9986 			case MII_MODEL_xxMARVELL_I82563:
   9987 				new_phytype = WMPHY_GG82563;
   9988 				break;
   9989 			default:
   9990 				break;
   9991 			}
   9992 			break;
   9993 		case MII_OUI_INTEL:
   9994 			switch (phy_model) {
   9995 			case MII_MODEL_INTEL_I82577:
   9996 				new_phytype = WMPHY_82577;
   9997 				break;
   9998 			case MII_MODEL_INTEL_I82579:
   9999 				new_phytype = WMPHY_82579;
   10000 				break;
   10001 			case MII_MODEL_INTEL_I217:
   10002 				new_phytype = WMPHY_I217;
   10003 				break;
   10004 			case MII_MODEL_INTEL_I82580:
   10005 			case MII_MODEL_INTEL_I350:
   10006 				new_phytype = WMPHY_82580;
   10007 				break;
   10008 			default:
   10009 				break;
   10010 			}
   10011 			break;
   10012 		case MII_OUI_yyINTEL:
   10013 			switch (phy_model) {
   10014 			case MII_MODEL_yyINTEL_I82562G:
   10015 			case MII_MODEL_yyINTEL_I82562EM:
   10016 			case MII_MODEL_yyINTEL_I82562ET:
   10017 				new_phytype = WMPHY_IFE;
   10018 				break;
   10019 			case MII_MODEL_yyINTEL_IGP01E1000:
   10020 				new_phytype = WMPHY_IGP;
   10021 				break;
   10022 			case MII_MODEL_yyINTEL_I82566:
   10023 				new_phytype = WMPHY_IGP_3;
   10024 				break;
   10025 			default:
   10026 				break;
   10027 			}
   10028 			break;
   10029 		default:
   10030 			break;
   10031 		}
   10032 		if (new_phytype == WMPHY_UNKNOWN)
   10033 			aprint_verbose_dev(dev,
   10034 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10035 			    __func__, phy_oui, phy_model);
   10036 
   10037 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10038 		    && (sc->sc_phytype != new_phytype )) {
   10039 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10040 			    "was incorrect. PHY type from PHY ID = %u\n",
   10041 			    sc->sc_phytype, new_phytype);
   10042 		}
   10043 	}
   10044 
   10045 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10046 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10047 		/* SGMII */
   10048 		new_readreg = wm_sgmii_readreg;
   10049 		new_writereg = wm_sgmii_writereg;
   10050 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10051 		/* BM2 (phyaddr == 1) */
   10052 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10053 		    && (new_phytype != WMPHY_BM)
   10054 		    && (new_phytype != WMPHY_UNKNOWN))
   10055 			doubt_phytype = new_phytype;
   10056 		new_phytype = WMPHY_BM;
   10057 		new_readreg = wm_gmii_bm_readreg;
   10058 		new_writereg = wm_gmii_bm_writereg;
   10059 	} else if (sc->sc_type >= WM_T_PCH) {
   10060 		/* All PCH* use _hv_ */
   10061 		new_readreg = wm_gmii_hv_readreg;
   10062 		new_writereg = wm_gmii_hv_writereg;
   10063 	} else if (sc->sc_type >= WM_T_ICH8) {
   10064 		/* non-82567 ICH8, 9 and 10 */
   10065 		new_readreg = wm_gmii_i82544_readreg;
   10066 		new_writereg = wm_gmii_i82544_writereg;
   10067 	} else if (sc->sc_type >= WM_T_80003) {
   10068 		/* 80003 */
   10069 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10070 		    && (new_phytype != WMPHY_GG82563)
   10071 		    && (new_phytype != WMPHY_UNKNOWN))
   10072 			doubt_phytype = new_phytype;
   10073 		new_phytype = WMPHY_GG82563;
   10074 		new_readreg = wm_gmii_i80003_readreg;
   10075 		new_writereg = wm_gmii_i80003_writereg;
   10076 	} else if (sc->sc_type >= WM_T_I210) {
   10077 		/* I210 and I211 */
   10078 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10079 		    && (new_phytype != WMPHY_I210)
   10080 		    && (new_phytype != WMPHY_UNKNOWN))
   10081 			doubt_phytype = new_phytype;
   10082 		new_phytype = WMPHY_I210;
   10083 		new_readreg = wm_gmii_gs40g_readreg;
   10084 		new_writereg = wm_gmii_gs40g_writereg;
   10085 	} else if (sc->sc_type >= WM_T_82580) {
   10086 		/* 82580, I350 and I354 */
   10087 		new_readreg = wm_gmii_82580_readreg;
   10088 		new_writereg = wm_gmii_82580_writereg;
   10089 	} else if (sc->sc_type >= WM_T_82544) {
   10090 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10091 		new_readreg = wm_gmii_i82544_readreg;
   10092 		new_writereg = wm_gmii_i82544_writereg;
   10093 	} else {
   10094 		new_readreg = wm_gmii_i82543_readreg;
   10095 		new_writereg = wm_gmii_i82543_writereg;
   10096 	}
   10097 
   10098 	if (new_phytype == WMPHY_BM) {
   10099 		/* All BM use _bm_ */
   10100 		new_readreg = wm_gmii_bm_readreg;
   10101 		new_writereg = wm_gmii_bm_writereg;
   10102 	}
   10103 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10104 		/* All PCH* use _hv_ */
   10105 		new_readreg = wm_gmii_hv_readreg;
   10106 		new_writereg = wm_gmii_hv_writereg;
   10107 	}
   10108 
   10109 	/* Diag output */
   10110 	if (doubt_phytype != WMPHY_UNKNOWN)
   10111 		aprint_error_dev(dev, "Assumed new PHY type was "
   10112 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10113 		    new_phytype);
   10114 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10115 	    && (sc->sc_phytype != new_phytype ))
   10116 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10117 		    "was incorrect. New PHY type = %u\n",
   10118 		    sc->sc_phytype, new_phytype);
   10119 
   10120 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10121 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10122 
   10123 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10124 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10125 		    "function was incorrect.\n");
   10126 
   10127 	/* Update now */
   10128 	sc->sc_phytype = new_phytype;
   10129 	mii->mii_readreg = new_readreg;
   10130 	mii->mii_writereg = new_writereg;
   10131 	if (new_readreg == wm_gmii_hv_readreg) {
   10132 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10133 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10134 	} else if (new_readreg == wm_sgmii_readreg) {
   10135 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10136 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10137 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10138 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10139 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10140 	}
   10141 }
   10142 
   10143 /*
   10144  * wm_get_phy_id_82575:
   10145  *
   10146  * Return PHY ID. Return -1 if it failed.
   10147  */
   10148 static int
   10149 wm_get_phy_id_82575(struct wm_softc *sc)
   10150 {
   10151 	uint32_t reg;
   10152 	int phyid = -1;
   10153 
   10154 	/* XXX */
   10155 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10156 		return -1;
   10157 
   10158 	if (wm_sgmii_uses_mdio(sc)) {
   10159 		switch (sc->sc_type) {
   10160 		case WM_T_82575:
   10161 		case WM_T_82576:
   10162 			reg = CSR_READ(sc, WMREG_MDIC);
   10163 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10164 			break;
   10165 		case WM_T_82580:
   10166 		case WM_T_I350:
   10167 		case WM_T_I354:
   10168 		case WM_T_I210:
   10169 		case WM_T_I211:
   10170 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10171 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10172 			break;
   10173 		default:
   10174 			return -1;
   10175 		}
   10176 	}
   10177 
   10178 	return phyid;
   10179 }
   10180 
   10181 
   10182 /*
   10183  * wm_gmii_mediainit:
   10184  *
   10185  *	Initialize media for use on 1000BASE-T devices.
   10186  */
   10187 static void
   10188 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10189 {
   10190 	device_t dev = sc->sc_dev;
   10191 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10192 	struct mii_data *mii = &sc->sc_mii;
   10193 	uint32_t reg;
   10194 
   10195 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10196 		device_xname(sc->sc_dev), __func__));
   10197 
   10198 	/* We have GMII. */
   10199 	sc->sc_flags |= WM_F_HAS_MII;
   10200 
   10201 	if (sc->sc_type == WM_T_80003)
   10202 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10203 	else
   10204 		sc->sc_tipg = TIPG_1000T_DFLT;
   10205 
   10206 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10207 	if ((sc->sc_type == WM_T_82580)
   10208 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10209 	    || (sc->sc_type == WM_T_I211)) {
   10210 		reg = CSR_READ(sc, WMREG_PHPM);
   10211 		reg &= ~PHPM_GO_LINK_D;
   10212 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10213 	}
   10214 
   10215 	/*
   10216 	 * Let the chip set speed/duplex on its own based on
   10217 	 * signals from the PHY.
   10218 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10219 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10220 	 */
   10221 	sc->sc_ctrl |= CTRL_SLU;
   10222 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10223 
   10224 	/* Initialize our media structures and probe the GMII. */
   10225 	mii->mii_ifp = ifp;
   10226 
   10227 	mii->mii_statchg = wm_gmii_statchg;
   10228 
   10229 	/* get PHY control from SMBus to PCIe */
   10230 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10231 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10232 	    || (sc->sc_type == WM_T_PCH_CNP))
   10233 		wm_init_phy_workarounds_pchlan(sc);
   10234 
   10235 	wm_gmii_reset(sc);
   10236 
   10237 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10238 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10239 	    wm_gmii_mediastatus);
   10240 
   10241 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10242 	    || (sc->sc_type == WM_T_82580)
   10243 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10244 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10245 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10246 			/* Attach only one port */
   10247 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10248 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10249 		} else {
   10250 			int i, id;
   10251 			uint32_t ctrl_ext;
   10252 
   10253 			id = wm_get_phy_id_82575(sc);
   10254 			if (id != -1) {
   10255 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10256 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10257 			}
   10258 			if ((id == -1)
   10259 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10260 				/* Power on sgmii phy if it is disabled */
   10261 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10262 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10263 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10264 				CSR_WRITE_FLUSH(sc);
   10265 				delay(300*1000); /* XXX too long */
   10266 
   10267 				/* From 1 to 8 */
   10268 				for (i = 1; i < 8; i++)
   10269 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10270 					    0xffffffff, i, MII_OFFSET_ANY,
   10271 					    MIIF_DOPAUSE);
   10272 
   10273 				/* Restore previous sfp cage power state */
   10274 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10275 			}
   10276 		}
   10277 	} else
   10278 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10279 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10280 
   10281 	/*
   10282 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10283 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10284 	 */
   10285 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10286 		|| (sc->sc_type == WM_T_PCH_SPT)
   10287 		|| (sc->sc_type == WM_T_PCH_CNP))
   10288 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10289 		wm_set_mdio_slow_mode_hv(sc);
   10290 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10291 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10292 	}
   10293 
   10294 	/*
   10295 	 * (For ICH8 variants)
   10296 	 * If PHY detection failed, use BM's r/w function and retry.
   10297 	 */
   10298 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10299 		/* if failed, retry with *_bm_* */
   10300 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10301 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10302 		    sc->sc_phytype);
   10303 		sc->sc_phytype = WMPHY_BM;
   10304 		mii->mii_readreg = wm_gmii_bm_readreg;
   10305 		mii->mii_writereg = wm_gmii_bm_writereg;
   10306 
   10307 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10308 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10309 	}
   10310 
   10311 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10312 		/* Any PHY wasn't find */
   10313 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10314 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10315 		sc->sc_phytype = WMPHY_NONE;
   10316 	} else {
   10317 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10318 
   10319 		/*
   10320 		 * PHY Found! Check PHY type again by the second call of
   10321 		 * wm_gmii_setup_phytype.
   10322 		 */
   10323 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10324 		    child->mii_mpd_model);
   10325 
   10326 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10327 	}
   10328 }
   10329 
   10330 /*
   10331  * wm_gmii_mediachange:	[ifmedia interface function]
   10332  *
   10333  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10334  */
   10335 static int
   10336 wm_gmii_mediachange(struct ifnet *ifp)
   10337 {
   10338 	struct wm_softc *sc = ifp->if_softc;
   10339 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10340 	int rc;
   10341 
   10342 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10343 		device_xname(sc->sc_dev), __func__));
   10344 	if ((ifp->if_flags & IFF_UP) == 0)
   10345 		return 0;
   10346 
   10347 	/* Disable D0 LPLU. */
   10348 	wm_lplu_d0_disable(sc);
   10349 
   10350 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10351 	sc->sc_ctrl |= CTRL_SLU;
   10352 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10353 	    || (sc->sc_type > WM_T_82543)) {
   10354 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10355 	} else {
   10356 		sc->sc_ctrl &= ~CTRL_ASDE;
   10357 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10358 		if (ife->ifm_media & IFM_FDX)
   10359 			sc->sc_ctrl |= CTRL_FD;
   10360 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10361 		case IFM_10_T:
   10362 			sc->sc_ctrl |= CTRL_SPEED_10;
   10363 			break;
   10364 		case IFM_100_TX:
   10365 			sc->sc_ctrl |= CTRL_SPEED_100;
   10366 			break;
   10367 		case IFM_1000_T:
   10368 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10369 			break;
   10370 		case IFM_NONE:
   10371 			/* There is no specific setting for IFM_NONE */
   10372 			break;
   10373 		default:
   10374 			panic("wm_gmii_mediachange: bad media 0x%x",
   10375 			    ife->ifm_media);
   10376 		}
   10377 	}
   10378 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10379 	CSR_WRITE_FLUSH(sc);
   10380 	if (sc->sc_type <= WM_T_82543)
   10381 		wm_gmii_reset(sc);
   10382 
   10383 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10384 		return 0;
   10385 	return rc;
   10386 }
   10387 
   10388 /*
   10389  * wm_gmii_mediastatus:	[ifmedia interface function]
   10390  *
   10391  *	Get the current interface media status on a 1000BASE-T device.
   10392  */
   10393 static void
   10394 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10395 {
   10396 	struct wm_softc *sc = ifp->if_softc;
   10397 
   10398 	ether_mediastatus(ifp, ifmr);
   10399 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10400 	    | sc->sc_flowflags;
   10401 }
   10402 
   10403 #define	MDI_IO		CTRL_SWDPIN(2)
   10404 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10405 #define	MDI_CLK		CTRL_SWDPIN(3)
   10406 
   10407 static void
   10408 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10409 {
   10410 	uint32_t i, v;
   10411 
   10412 	v = CSR_READ(sc, WMREG_CTRL);
   10413 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10414 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10415 
   10416 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10417 		if (data & i)
   10418 			v |= MDI_IO;
   10419 		else
   10420 			v &= ~MDI_IO;
   10421 		CSR_WRITE(sc, WMREG_CTRL, v);
   10422 		CSR_WRITE_FLUSH(sc);
   10423 		delay(10);
   10424 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10425 		CSR_WRITE_FLUSH(sc);
   10426 		delay(10);
   10427 		CSR_WRITE(sc, WMREG_CTRL, v);
   10428 		CSR_WRITE_FLUSH(sc);
   10429 		delay(10);
   10430 	}
   10431 }
   10432 
   10433 static uint16_t
   10434 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10435 {
   10436 	uint32_t v, i;
   10437 	uint16_t data = 0;
   10438 
   10439 	v = CSR_READ(sc, WMREG_CTRL);
   10440 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10441 	v |= CTRL_SWDPIO(3);
   10442 
   10443 	CSR_WRITE(sc, WMREG_CTRL, v);
   10444 	CSR_WRITE_FLUSH(sc);
   10445 	delay(10);
   10446 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10447 	CSR_WRITE_FLUSH(sc);
   10448 	delay(10);
   10449 	CSR_WRITE(sc, WMREG_CTRL, v);
   10450 	CSR_WRITE_FLUSH(sc);
   10451 	delay(10);
   10452 
   10453 	for (i = 0; i < 16; i++) {
   10454 		data <<= 1;
   10455 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10456 		CSR_WRITE_FLUSH(sc);
   10457 		delay(10);
   10458 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10459 			data |= 1;
   10460 		CSR_WRITE(sc, WMREG_CTRL, v);
   10461 		CSR_WRITE_FLUSH(sc);
   10462 		delay(10);
   10463 	}
   10464 
   10465 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10466 	CSR_WRITE_FLUSH(sc);
   10467 	delay(10);
   10468 	CSR_WRITE(sc, WMREG_CTRL, v);
   10469 	CSR_WRITE_FLUSH(sc);
   10470 	delay(10);
   10471 
   10472 	return data;
   10473 }
   10474 
   10475 #undef MDI_IO
   10476 #undef MDI_DIR
   10477 #undef MDI_CLK
   10478 
   10479 /*
   10480  * wm_gmii_i82543_readreg:	[mii interface function]
   10481  *
   10482  *	Read a PHY register on the GMII (i82543 version).
   10483  */
   10484 static int
   10485 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10486 {
   10487 	struct wm_softc *sc = device_private(dev);
   10488 
   10489 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10490 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10491 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10492 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10493 
   10494 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10495 		device_xname(dev), phy, reg, *val));
   10496 
   10497 	return 0;
   10498 }
   10499 
   10500 /*
   10501  * wm_gmii_i82543_writereg:	[mii interface function]
   10502  *
   10503  *	Write a PHY register on the GMII (i82543 version).
   10504  */
   10505 static int
   10506 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10507 {
   10508 	struct wm_softc *sc = device_private(dev);
   10509 
   10510 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10511 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10512 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10513 	    (MII_COMMAND_START << 30), 32);
   10514 
   10515 	return 0;
   10516 }
   10517 
   10518 /*
   10519  * wm_gmii_mdic_readreg:	[mii interface function]
   10520  *
   10521  *	Read a PHY register on the GMII.
   10522  */
   10523 static int
   10524 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10525 {
   10526 	struct wm_softc *sc = device_private(dev);
   10527 	uint32_t mdic = 0;
   10528 	int i;
   10529 
   10530 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10531 	    && (reg > MII_ADDRMASK)) {
   10532 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10533 		    __func__, sc->sc_phytype, reg);
   10534 		reg &= MII_ADDRMASK;
   10535 	}
   10536 
   10537 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10538 	    MDIC_REGADD(reg));
   10539 
   10540 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10541 		delay(50);
   10542 		mdic = CSR_READ(sc, WMREG_MDIC);
   10543 		if (mdic & MDIC_READY)
   10544 			break;
   10545 	}
   10546 
   10547 	if ((mdic & MDIC_READY) == 0) {
   10548 		DPRINTF(WM_DEBUG_GMII,
   10549 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10550 			device_xname(dev), phy, reg));
   10551 		return ETIMEDOUT;
   10552 	} else if (mdic & MDIC_E) {
   10553 		/* This is normal if no PHY is present. */
   10554 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10555 			device_xname(sc->sc_dev), phy, reg));
   10556 		return -1;
   10557 	} else
   10558 		*val = MDIC_DATA(mdic);
   10559 
   10560 	/*
   10561 	 * Allow some time after each MDIC transaction to avoid
   10562 	 * reading duplicate data in the next MDIC transaction.
   10563 	 */
   10564 	if (sc->sc_type == WM_T_PCH2)
   10565 		delay(100);
   10566 
   10567 	return 0;
   10568 }
   10569 
   10570 /*
   10571  * wm_gmii_mdic_writereg:	[mii interface function]
   10572  *
   10573  *	Write a PHY register on the GMII.
   10574  */
   10575 static int
   10576 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10577 {
   10578 	struct wm_softc *sc = device_private(dev);
   10579 	uint32_t mdic = 0;
   10580 	int i;
   10581 
   10582 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10583 	    && (reg > MII_ADDRMASK)) {
   10584 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10585 		    __func__, sc->sc_phytype, reg);
   10586 		reg &= MII_ADDRMASK;
   10587 	}
   10588 
   10589 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10590 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10591 
   10592 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10593 		delay(50);
   10594 		mdic = CSR_READ(sc, WMREG_MDIC);
   10595 		if (mdic & MDIC_READY)
   10596 			break;
   10597 	}
   10598 
   10599 	if ((mdic & MDIC_READY) == 0) {
   10600 		DPRINTF(WM_DEBUG_GMII,
   10601 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10602 			device_xname(dev), phy, reg));
   10603 		return ETIMEDOUT;
   10604 	} else if (mdic & MDIC_E) {
   10605 		DPRINTF(WM_DEBUG_GMII,
   10606 		    ("%s: MDIC write error: phy %d reg %d\n",
   10607 			device_xname(dev), phy, reg));
   10608 		return -1;
   10609 	}
   10610 
   10611 	/*
   10612 	 * Allow some time after each MDIC transaction to avoid
   10613 	 * reading duplicate data in the next MDIC transaction.
   10614 	 */
   10615 	if (sc->sc_type == WM_T_PCH2)
   10616 		delay(100);
   10617 
   10618 	return 0;
   10619 }
   10620 
   10621 /*
   10622  * wm_gmii_i82544_readreg:	[mii interface function]
   10623  *
   10624  *	Read a PHY register on the GMII.
   10625  */
   10626 static int
   10627 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10628 {
   10629 	struct wm_softc *sc = device_private(dev);
   10630 	int rv;
   10631 
   10632 	if (sc->phy.acquire(sc)) {
   10633 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10634 		return -1;
   10635 	}
   10636 
   10637 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10638 
   10639 	sc->phy.release(sc);
   10640 
   10641 	return rv;
   10642 }
   10643 
   10644 static int
   10645 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10646 {
   10647 	struct wm_softc *sc = device_private(dev);
   10648 	int rv;
   10649 
   10650 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10651 		switch (sc->sc_phytype) {
   10652 		case WMPHY_IGP:
   10653 		case WMPHY_IGP_2:
   10654 		case WMPHY_IGP_3:
   10655 			rv = wm_gmii_mdic_writereg(dev, phy,
   10656 			    MII_IGPHY_PAGE_SELECT, reg);
   10657 			if (rv != 0)
   10658 				return rv;
   10659 			break;
   10660 		default:
   10661 #ifdef WM_DEBUG
   10662 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10663 			    __func__, sc->sc_phytype, reg);
   10664 #endif
   10665 			break;
   10666 		}
   10667 	}
   10668 
   10669 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10670 }
   10671 
   10672 /*
   10673  * wm_gmii_i82544_writereg:	[mii interface function]
   10674  *
   10675  *	Write a PHY register on the GMII.
   10676  */
   10677 static int
   10678 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10679 {
   10680 	struct wm_softc *sc = device_private(dev);
   10681 	int rv;
   10682 
   10683 	if (sc->phy.acquire(sc)) {
   10684 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10685 		return -1;
   10686 	}
   10687 
   10688 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10689 	sc->phy.release(sc);
   10690 
   10691 	return rv;
   10692 }
   10693 
   10694 static int
   10695 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10696 {
   10697 	struct wm_softc *sc = device_private(dev);
   10698 	int rv;
   10699 
   10700 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10701 		switch (sc->sc_phytype) {
   10702 		case WMPHY_IGP:
   10703 		case WMPHY_IGP_2:
   10704 		case WMPHY_IGP_3:
   10705 			rv = wm_gmii_mdic_writereg(dev, phy,
   10706 			    MII_IGPHY_PAGE_SELECT, reg);
   10707 			if (rv != 0)
   10708 				return rv;
   10709 			break;
   10710 		default:
   10711 #ifdef WM_DEBUG
   10712 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10713 			    __func__, sc->sc_phytype, reg);
   10714 #endif
   10715 			break;
   10716 		}
   10717 	}
   10718 
   10719 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10720 }
   10721 
   10722 /*
   10723  * wm_gmii_i80003_readreg:	[mii interface function]
   10724  *
   10725  *	Read a PHY register on the kumeran
   10726  * This could be handled by the PHY layer if we didn't have to lock the
   10727  * ressource ...
   10728  */
   10729 static int
   10730 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10731 {
   10732 	struct wm_softc *sc = device_private(dev);
   10733 	int page_select;
   10734 	uint16_t temp, temp2;
   10735 	int rv = 0;
   10736 
   10737 	if (phy != 1) /* Only one PHY on kumeran bus */
   10738 		return -1;
   10739 
   10740 	if (sc->phy.acquire(sc)) {
   10741 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10742 		return -1;
   10743 	}
   10744 
   10745 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10746 		page_select = GG82563_PHY_PAGE_SELECT;
   10747 	else {
   10748 		/*
   10749 		 * Use Alternative Page Select register to access registers
   10750 		 * 30 and 31.
   10751 		 */
   10752 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10753 	}
   10754 	temp = reg >> GG82563_PAGE_SHIFT;
   10755 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10756 		goto out;
   10757 
   10758 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10759 		/*
   10760 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10761 		 * register.
   10762 		 */
   10763 		delay(200);
   10764 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10765 		if ((rv != 0) || (temp2 != temp)) {
   10766 			device_printf(dev, "%s failed\n", __func__);
   10767 			rv = -1;
   10768 			goto out;
   10769 		}
   10770 		delay(200);
   10771 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10772 		delay(200);
   10773 	} else
   10774 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10775 
   10776 out:
   10777 	sc->phy.release(sc);
   10778 	return rv;
   10779 }
   10780 
   10781 /*
   10782  * wm_gmii_i80003_writereg:	[mii interface function]
   10783  *
   10784  *	Write a PHY register on the kumeran.
   10785  * This could be handled by the PHY layer if we didn't have to lock the
   10786  * ressource ...
   10787  */
   10788 static int
   10789 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10790 {
   10791 	struct wm_softc *sc = device_private(dev);
   10792 	int page_select, rv;
   10793 	uint16_t temp, temp2;
   10794 
   10795 	if (phy != 1) /* Only one PHY on kumeran bus */
   10796 		return -1;
   10797 
   10798 	if (sc->phy.acquire(sc)) {
   10799 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10800 		return -1;
   10801 	}
   10802 
   10803 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10804 		page_select = GG82563_PHY_PAGE_SELECT;
   10805 	else {
   10806 		/*
   10807 		 * Use Alternative Page Select register to access registers
   10808 		 * 30 and 31.
   10809 		 */
   10810 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10811 	}
   10812 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10813 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10814 		goto out;
   10815 
   10816 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10817 		/*
   10818 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10819 		 * register.
   10820 		 */
   10821 		delay(200);
   10822 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10823 		if ((rv != 0) || (temp2 != temp)) {
   10824 			device_printf(dev, "%s failed\n", __func__);
   10825 			rv = -1;
   10826 			goto out;
   10827 		}
   10828 		delay(200);
   10829 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10830 		delay(200);
   10831 	} else
   10832 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10833 
   10834 out:
   10835 	sc->phy.release(sc);
   10836 	return rv;
   10837 }
   10838 
   10839 /*
   10840  * wm_gmii_bm_readreg:	[mii interface function]
   10841  *
   10842  *	Read a PHY register on the kumeran
   10843  * This could be handled by the PHY layer if we didn't have to lock the
   10844  * ressource ...
   10845  */
   10846 static int
   10847 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10848 {
   10849 	struct wm_softc *sc = device_private(dev);
   10850 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10851 	int rv;
   10852 
   10853 	if (sc->phy.acquire(sc)) {
   10854 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10855 		return -1;
   10856 	}
   10857 
   10858 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10859 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10860 		    || (reg == 31)) ? 1 : phy;
   10861 	/* Page 800 works differently than the rest so it has its own func */
   10862 	if (page == BM_WUC_PAGE) {
   10863 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10864 		goto release;
   10865 	}
   10866 
   10867 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10868 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10869 		    && (sc->sc_type != WM_T_82583))
   10870 			rv = wm_gmii_mdic_writereg(dev, phy,
   10871 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10872 		else
   10873 			rv = wm_gmii_mdic_writereg(dev, phy,
   10874 			    BME1000_PHY_PAGE_SELECT, page);
   10875 		if (rv != 0)
   10876 			goto release;
   10877 	}
   10878 
   10879 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10880 
   10881 release:
   10882 	sc->phy.release(sc);
   10883 	return rv;
   10884 }
   10885 
   10886 /*
   10887  * wm_gmii_bm_writereg:	[mii interface function]
   10888  *
   10889  *	Write a PHY register on the kumeran.
   10890  * This could be handled by the PHY layer if we didn't have to lock the
   10891  * ressource ...
   10892  */
   10893 static int
   10894 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10895 {
   10896 	struct wm_softc *sc = device_private(dev);
   10897 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10898 	int rv;
   10899 
   10900 	if (sc->phy.acquire(sc)) {
   10901 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10902 		return -1;
   10903 	}
   10904 
   10905 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10906 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10907 		    || (reg == 31)) ? 1 : phy;
   10908 	/* Page 800 works differently than the rest so it has its own func */
   10909 	if (page == BM_WUC_PAGE) {
   10910 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10911 		goto release;
   10912 	}
   10913 
   10914 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10915 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10916 		    && (sc->sc_type != WM_T_82583))
   10917 			rv = wm_gmii_mdic_writereg(dev, phy,
   10918 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10919 		else
   10920 			rv = wm_gmii_mdic_writereg(dev, phy,
   10921 			    BME1000_PHY_PAGE_SELECT, page);
   10922 		if (rv != 0)
   10923 			goto release;
   10924 	}
   10925 
   10926 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10927 
   10928 release:
   10929 	sc->phy.release(sc);
   10930 	return rv;
   10931 }
   10932 
   10933 /*
   10934  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10935  *  @dev: pointer to the HW structure
   10936  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10937  *
   10938  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10939  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10940  */
   10941 static int
   10942 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10943 {
   10944 	uint16_t temp;
   10945 	int rv;
   10946 
   10947 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10948 		device_xname(dev), __func__));
   10949 
   10950 	if (!phy_regp)
   10951 		return -1;
   10952 
   10953 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10954 
   10955 	/* Select Port Control Registers page */
   10956 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10957 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10958 	if (rv != 0)
   10959 		return rv;
   10960 
   10961 	/* Read WUCE and save it */
   10962 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10963 	if (rv != 0)
   10964 		return rv;
   10965 
   10966 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10967 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10968 	 */
   10969 	temp = *phy_regp;
   10970 	temp |= BM_WUC_ENABLE_BIT;
   10971 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10972 
   10973 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10974 		return rv;
   10975 
   10976 	/* Select Host Wakeup Registers page - caller now able to write
   10977 	 * registers on the Wakeup registers page
   10978 	 */
   10979 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10980 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10981 }
   10982 
   10983 /*
   10984  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10985  *  @dev: pointer to the HW structure
   10986  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10987  *
   10988  *  Restore BM_WUC_ENABLE_REG to its original value.
   10989  *
   10990  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10991  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10992  *  caller.
   10993  */
   10994 static int
   10995 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10996 {
   10997 
   10998 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10999 		device_xname(dev), __func__));
   11000 
   11001 	if (!phy_regp)
   11002 		return -1;
   11003 
   11004 	/* Select Port Control Registers page */
   11005 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11006 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11007 
   11008 	/* Restore 769.17 to its original value */
   11009 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11010 
   11011 	return 0;
   11012 }
   11013 
   11014 /*
   11015  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11016  *  @sc: pointer to the HW structure
   11017  *  @offset: register offset to be read or written
   11018  *  @val: pointer to the data to read or write
   11019  *  @rd: determines if operation is read or write
   11020  *  @page_set: BM_WUC_PAGE already set and access enabled
   11021  *
   11022  *  Read the PHY register at offset and store the retrieved information in
   11023  *  data, or write data to PHY register at offset.  Note the procedure to
   11024  *  access the PHY wakeup registers is different than reading the other PHY
   11025  *  registers. It works as such:
   11026  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11027  *  2) Set page to 800 for host (801 if we were manageability)
   11028  *  3) Write the address using the address opcode (0x11)
   11029  *  4) Read or write the data using the data opcode (0x12)
   11030  *  5) Restore 769.17.2 to its original value
   11031  *
   11032  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11033  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11034  *
   11035  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11036  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11037  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11038  */
   11039 static int
   11040 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11041 	bool page_set)
   11042 {
   11043 	struct wm_softc *sc = device_private(dev);
   11044 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11045 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11046 	uint16_t wuce;
   11047 	int rv = 0;
   11048 
   11049 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11050 		device_xname(dev), __func__));
   11051 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11052 	if ((sc->sc_type == WM_T_PCH)
   11053 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11054 		device_printf(dev,
   11055 		    "Attempting to access page %d while gig enabled.\n", page);
   11056 	}
   11057 
   11058 	if (!page_set) {
   11059 		/* Enable access to PHY wakeup registers */
   11060 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11061 		if (rv != 0) {
   11062 			device_printf(dev,
   11063 			    "%s: Could not enable PHY wakeup reg access\n",
   11064 			    __func__);
   11065 			return rv;
   11066 		}
   11067 	}
   11068 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11069 		device_xname(sc->sc_dev), __func__, page, regnum));
   11070 
   11071 	/*
   11072 	 * 2) Access PHY wakeup register.
   11073 	 * See wm_access_phy_wakeup_reg_bm.
   11074 	 */
   11075 
   11076 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11077 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11078 	if (rv != 0)
   11079 		return rv;
   11080 
   11081 	if (rd) {
   11082 		/* Read the Wakeup register page value using opcode 0x12 */
   11083 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11084 	} else {
   11085 		/* Write the Wakeup register page value using opcode 0x12 */
   11086 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11087 	}
   11088 	if (rv != 0)
   11089 		return rv;
   11090 
   11091 	if (!page_set)
   11092 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11093 
   11094 	return rv;
   11095 }
   11096 
   11097 /*
   11098  * wm_gmii_hv_readreg:	[mii interface function]
   11099  *
   11100  *	Read a PHY register on the kumeran
   11101  * This could be handled by the PHY layer if we didn't have to lock the
   11102  * ressource ...
   11103  */
   11104 static int
   11105 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11106 {
   11107 	struct wm_softc *sc = device_private(dev);
   11108 	int rv;
   11109 
   11110 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11111 		device_xname(dev), __func__));
   11112 	if (sc->phy.acquire(sc)) {
   11113 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11114 		return -1;
   11115 	}
   11116 
   11117 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11118 	sc->phy.release(sc);
   11119 	return rv;
   11120 }
   11121 
   11122 static int
   11123 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11124 {
   11125 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11126 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11127 	int rv;
   11128 
   11129 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11130 
   11131 	/* Page 800 works differently than the rest so it has its own func */
   11132 	if (page == BM_WUC_PAGE)
   11133 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11134 
   11135 	/*
   11136 	 * Lower than page 768 works differently than the rest so it has its
   11137 	 * own func
   11138 	 */
   11139 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11140 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11141 		return -1;
   11142 	}
   11143 
   11144 	/*
   11145 	 * XXX I21[789] documents say that the SMBus Address register is at
   11146 	 * PHY address 01, Page 0 (not 768), Register 26.
   11147 	 */
   11148 	if (page == HV_INTC_FC_PAGE_START)
   11149 		page = 0;
   11150 
   11151 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11152 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11153 		    page << BME1000_PAGE_SHIFT);
   11154 		if (rv != 0)
   11155 			return rv;
   11156 	}
   11157 
   11158 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11159 }
   11160 
   11161 /*
   11162  * wm_gmii_hv_writereg:	[mii interface function]
   11163  *
   11164  *	Write a PHY register on the kumeran.
   11165  * This could be handled by the PHY layer if we didn't have to lock the
   11166  * ressource ...
   11167  */
   11168 static int
   11169 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11170 {
   11171 	struct wm_softc *sc = device_private(dev);
   11172 	int rv;
   11173 
   11174 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11175 		device_xname(dev), __func__));
   11176 
   11177 	if (sc->phy.acquire(sc)) {
   11178 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11179 		return -1;
   11180 	}
   11181 
   11182 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11183 	sc->phy.release(sc);
   11184 
   11185 	return rv;
   11186 }
   11187 
   11188 static int
   11189 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11190 {
   11191 	struct wm_softc *sc = device_private(dev);
   11192 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11193 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11194 	int rv;
   11195 
   11196 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11197 
   11198 	/* Page 800 works differently than the rest so it has its own func */
   11199 	if (page == BM_WUC_PAGE)
   11200 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11201 		    false);
   11202 
   11203 	/*
   11204 	 * Lower than page 768 works differently than the rest so it has its
   11205 	 * own func
   11206 	 */
   11207 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11208 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11209 		return -1;
   11210 	}
   11211 
   11212 	{
   11213 		/*
   11214 		 * XXX I21[789] documents say that the SMBus Address register
   11215 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11216 		 */
   11217 		if (page == HV_INTC_FC_PAGE_START)
   11218 			page = 0;
   11219 
   11220 		/*
   11221 		 * XXX Workaround MDIO accesses being disabled after entering
   11222 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11223 		 * register is set)
   11224 		 */
   11225 		if (sc->sc_phytype == WMPHY_82578) {
   11226 			struct mii_softc *child;
   11227 
   11228 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11229 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11230 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11231 			    && ((val & (1 << 11)) != 0)) {
   11232 				device_printf(dev, "XXX need workaround\n");
   11233 			}
   11234 		}
   11235 
   11236 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11237 			rv = wm_gmii_mdic_writereg(dev, 1,
   11238 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11239 			if (rv != 0)
   11240 				return rv;
   11241 		}
   11242 	}
   11243 
   11244 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11245 }
   11246 
   11247 /*
   11248  * wm_gmii_82580_readreg:	[mii interface function]
   11249  *
   11250  *	Read a PHY register on the 82580 and I350.
   11251  * This could be handled by the PHY layer if we didn't have to lock the
   11252  * ressource ...
   11253  */
   11254 static int
   11255 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11256 {
   11257 	struct wm_softc *sc = device_private(dev);
   11258 	int rv;
   11259 
   11260 	if (sc->phy.acquire(sc) != 0) {
   11261 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11262 		return -1;
   11263 	}
   11264 
   11265 #ifdef DIAGNOSTIC
   11266 	if (reg > MII_ADDRMASK) {
   11267 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11268 		    __func__, sc->sc_phytype, reg);
   11269 		reg &= MII_ADDRMASK;
   11270 	}
   11271 #endif
   11272 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11273 
   11274 	sc->phy.release(sc);
   11275 	return rv;
   11276 }
   11277 
   11278 /*
   11279  * wm_gmii_82580_writereg:	[mii interface function]
   11280  *
   11281  *	Write a PHY register on the 82580 and I350.
   11282  * This could be handled by the PHY layer if we didn't have to lock the
   11283  * ressource ...
   11284  */
   11285 static int
   11286 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11287 {
   11288 	struct wm_softc *sc = device_private(dev);
   11289 	int rv;
   11290 
   11291 	if (sc->phy.acquire(sc) != 0) {
   11292 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11293 		return -1;
   11294 	}
   11295 
   11296 #ifdef DIAGNOSTIC
   11297 	if (reg > MII_ADDRMASK) {
   11298 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11299 		    __func__, sc->sc_phytype, reg);
   11300 		reg &= MII_ADDRMASK;
   11301 	}
   11302 #endif
   11303 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11304 
   11305 	sc->phy.release(sc);
   11306 	return rv;
   11307 }
   11308 
   11309 /*
   11310  * wm_gmii_gs40g_readreg:	[mii interface function]
   11311  *
   11312  *	Read a PHY register on the I2100 and I211.
   11313  * This could be handled by the PHY layer if we didn't have to lock the
   11314  * ressource ...
   11315  */
   11316 static int
   11317 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11318 {
   11319 	struct wm_softc *sc = device_private(dev);
   11320 	int page, offset;
   11321 	int rv;
   11322 
   11323 	/* Acquire semaphore */
   11324 	if (sc->phy.acquire(sc)) {
   11325 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11326 		return -1;
   11327 	}
   11328 
   11329 	/* Page select */
   11330 	page = reg >> GS40G_PAGE_SHIFT;
   11331 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11332 	if (rv != 0)
   11333 		goto release;
   11334 
   11335 	/* Read reg */
   11336 	offset = reg & GS40G_OFFSET_MASK;
   11337 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11338 
   11339 release:
   11340 	sc->phy.release(sc);
   11341 	return rv;
   11342 }
   11343 
   11344 /*
   11345  * wm_gmii_gs40g_writereg:	[mii interface function]
   11346  *
   11347  *	Write a PHY register on the I210 and I211.
   11348  * This could be handled by the PHY layer if we didn't have to lock the
   11349  * ressource ...
   11350  */
   11351 static int
   11352 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11353 {
   11354 	struct wm_softc *sc = device_private(dev);
   11355 	uint16_t page;
   11356 	int offset, rv;
   11357 
   11358 	/* Acquire semaphore */
   11359 	if (sc->phy.acquire(sc)) {
   11360 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11361 		return -1;
   11362 	}
   11363 
   11364 	/* Page select */
   11365 	page = reg >> GS40G_PAGE_SHIFT;
   11366 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11367 	if (rv != 0)
   11368 		goto release;
   11369 
   11370 	/* Write reg */
   11371 	offset = reg & GS40G_OFFSET_MASK;
   11372 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11373 
   11374 release:
   11375 	/* Release semaphore */
   11376 	sc->phy.release(sc);
   11377 	return rv;
   11378 }
   11379 
   11380 /*
   11381  * wm_gmii_statchg:	[mii interface function]
   11382  *
   11383  *	Callback from MII layer when media changes.
   11384  */
   11385 static void
   11386 wm_gmii_statchg(struct ifnet *ifp)
   11387 {
   11388 	struct wm_softc *sc = ifp->if_softc;
   11389 	struct mii_data *mii = &sc->sc_mii;
   11390 
   11391 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11392 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11393 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11394 
   11395 	/* Get flow control negotiation result. */
   11396 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11397 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11398 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11399 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11400 	}
   11401 
   11402 	if (sc->sc_flowflags & IFM_FLOW) {
   11403 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11404 			sc->sc_ctrl |= CTRL_TFCE;
   11405 			sc->sc_fcrtl |= FCRTL_XONE;
   11406 		}
   11407 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11408 			sc->sc_ctrl |= CTRL_RFCE;
   11409 	}
   11410 
   11411 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11412 		DPRINTF(WM_DEBUG_LINK,
   11413 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11414 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11415 	} else {
   11416 		DPRINTF(WM_DEBUG_LINK,
   11417 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11418 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11419 	}
   11420 
   11421 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11422 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11423 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11424 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11425 	if (sc->sc_type == WM_T_80003) {
   11426 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11427 		case IFM_1000_T:
   11428 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11429 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11430 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11431 			break;
   11432 		default:
   11433 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11434 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11435 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11436 			break;
   11437 		}
   11438 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11439 	}
   11440 }
   11441 
   11442 /* kumeran related (80003, ICH* and PCH*) */
   11443 
   11444 /*
   11445  * wm_kmrn_readreg:
   11446  *
   11447  *	Read a kumeran register
   11448  */
   11449 static int
   11450 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11451 {
   11452 	int rv;
   11453 
   11454 	if (sc->sc_type == WM_T_80003)
   11455 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11456 	else
   11457 		rv = sc->phy.acquire(sc);
   11458 	if (rv != 0) {
   11459 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11460 		    __func__);
   11461 		return rv;
   11462 	}
   11463 
   11464 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11465 
   11466 	if (sc->sc_type == WM_T_80003)
   11467 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11468 	else
   11469 		sc->phy.release(sc);
   11470 
   11471 	return rv;
   11472 }
   11473 
   11474 static int
   11475 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11476 {
   11477 
   11478 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11479 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11480 	    KUMCTRLSTA_REN);
   11481 	CSR_WRITE_FLUSH(sc);
   11482 	delay(2);
   11483 
   11484 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11485 
   11486 	return 0;
   11487 }
   11488 
   11489 /*
   11490  * wm_kmrn_writereg:
   11491  *
   11492  *	Write a kumeran register
   11493  */
   11494 static int
   11495 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11496 {
   11497 	int rv;
   11498 
   11499 	if (sc->sc_type == WM_T_80003)
   11500 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11501 	else
   11502 		rv = sc->phy.acquire(sc);
   11503 	if (rv != 0) {
   11504 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11505 		    __func__);
   11506 		return rv;
   11507 	}
   11508 
   11509 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11510 
   11511 	if (sc->sc_type == WM_T_80003)
   11512 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11513 	else
   11514 		sc->phy.release(sc);
   11515 
   11516 	return rv;
   11517 }
   11518 
   11519 static int
   11520 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11521 {
   11522 
   11523 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11524 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11525 
   11526 	return 0;
   11527 }
   11528 
   11529 /*
   11530  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11531  * This access method is different from IEEE MMD.
   11532  */
   11533 static int
   11534 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11535 {
   11536 	struct wm_softc *sc = device_private(dev);
   11537 	int rv;
   11538 
   11539 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11540 	if (rv != 0)
   11541 		return rv;
   11542 
   11543 	if (rd)
   11544 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11545 	else
   11546 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11547 	return rv;
   11548 }
   11549 
   11550 static int
   11551 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11552 {
   11553 
   11554 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11555 }
   11556 
   11557 static int
   11558 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11559 {
   11560 
   11561 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11562 }
   11563 
   11564 /* SGMII related */
   11565 
   11566 /*
   11567  * wm_sgmii_uses_mdio
   11568  *
   11569  * Check whether the transaction is to the internal PHY or the external
   11570  * MDIO interface. Return true if it's MDIO.
   11571  */
   11572 static bool
   11573 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11574 {
   11575 	uint32_t reg;
   11576 	bool ismdio = false;
   11577 
   11578 	switch (sc->sc_type) {
   11579 	case WM_T_82575:
   11580 	case WM_T_82576:
   11581 		reg = CSR_READ(sc, WMREG_MDIC);
   11582 		ismdio = ((reg & MDIC_DEST) != 0);
   11583 		break;
   11584 	case WM_T_82580:
   11585 	case WM_T_I350:
   11586 	case WM_T_I354:
   11587 	case WM_T_I210:
   11588 	case WM_T_I211:
   11589 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11590 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11591 		break;
   11592 	default:
   11593 		break;
   11594 	}
   11595 
   11596 	return ismdio;
   11597 }
   11598 
   11599 /*
   11600  * wm_sgmii_readreg:	[mii interface function]
   11601  *
   11602  *	Read a PHY register on the SGMII
   11603  * This could be handled by the PHY layer if we didn't have to lock the
   11604  * ressource ...
   11605  */
   11606 static int
   11607 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11608 {
   11609 	struct wm_softc *sc = device_private(dev);
   11610 	int rv;
   11611 
   11612 	if (sc->phy.acquire(sc)) {
   11613 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11614 		return -1;
   11615 	}
   11616 
   11617 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11618 
   11619 	sc->phy.release(sc);
   11620 	return rv;
   11621 }
   11622 
   11623 static int
   11624 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11625 {
   11626 	struct wm_softc *sc = device_private(dev);
   11627 	uint32_t i2ccmd;
   11628 	int i, rv;
   11629 
   11630 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11631 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11632 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11633 
   11634 	/* Poll the ready bit */
   11635 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11636 		delay(50);
   11637 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11638 		if (i2ccmd & I2CCMD_READY)
   11639 			break;
   11640 	}
   11641 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11642 		device_printf(dev, "I2CCMD Read did not complete\n");
   11643 		rv = ETIMEDOUT;
   11644 	}
   11645 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11646 		device_printf(dev, "I2CCMD Error bit set\n");
   11647 		rv = EIO;
   11648 	}
   11649 
   11650 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11651 
   11652 	return rv;
   11653 }
   11654 
   11655 /*
   11656  * wm_sgmii_writereg:	[mii interface function]
   11657  *
   11658  *	Write a PHY register on the SGMII.
   11659  * This could be handled by the PHY layer if we didn't have to lock the
   11660  * ressource ...
   11661  */
   11662 static int
   11663 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11664 {
   11665 	struct wm_softc *sc = device_private(dev);
   11666 	int rv;
   11667 
   11668 	if (sc->phy.acquire(sc) != 0) {
   11669 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11670 		return -1;
   11671 	}
   11672 
   11673 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11674 
   11675 	sc->phy.release(sc);
   11676 
   11677 	return rv;
   11678 }
   11679 
   11680 static int
   11681 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11682 {
   11683 	struct wm_softc *sc = device_private(dev);
   11684 	uint32_t i2ccmd;
   11685 	uint16_t swapdata;
   11686 	int rv = 0;
   11687 	int i;
   11688 
   11689 	/* Swap the data bytes for the I2C interface */
   11690 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11691 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11692 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11693 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11694 
   11695 	/* Poll the ready bit */
   11696 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11697 		delay(50);
   11698 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11699 		if (i2ccmd & I2CCMD_READY)
   11700 			break;
   11701 	}
   11702 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11703 		device_printf(dev, "I2CCMD Write did not complete\n");
   11704 		rv = ETIMEDOUT;
   11705 	}
   11706 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11707 		device_printf(dev, "I2CCMD Error bit set\n");
   11708 		rv = EIO;
   11709 	}
   11710 
   11711 	return rv;
   11712 }
   11713 
   11714 /* TBI related */
   11715 
   11716 static bool
   11717 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11718 {
   11719 	bool sig;
   11720 
   11721 	sig = ctrl & CTRL_SWDPIN(1);
   11722 
   11723 	/*
   11724 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11725 	 * detect a signal, 1 if they don't.
   11726 	 */
   11727 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11728 		sig = !sig;
   11729 
   11730 	return sig;
   11731 }
   11732 
   11733 /*
   11734  * wm_tbi_mediainit:
   11735  *
   11736  *	Initialize media for use on 1000BASE-X devices.
   11737  */
   11738 static void
   11739 wm_tbi_mediainit(struct wm_softc *sc)
   11740 {
   11741 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11742 	const char *sep = "";
   11743 
   11744 	if (sc->sc_type < WM_T_82543)
   11745 		sc->sc_tipg = TIPG_WM_DFLT;
   11746 	else
   11747 		sc->sc_tipg = TIPG_LG_DFLT;
   11748 
   11749 	sc->sc_tbi_serdes_anegticks = 5;
   11750 
   11751 	/* Initialize our media structures */
   11752 	sc->sc_mii.mii_ifp = ifp;
   11753 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11754 
   11755 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11756 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11757 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11758 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11759 	else
   11760 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11761 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11762 
   11763 	/*
   11764 	 * SWD Pins:
   11765 	 *
   11766 	 *	0 = Link LED (output)
   11767 	 *	1 = Loss Of Signal (input)
   11768 	 */
   11769 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11770 
   11771 	/* XXX Perhaps this is only for TBI */
   11772 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11773 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11774 
   11775 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11776 		sc->sc_ctrl &= ~CTRL_LRST;
   11777 
   11778 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11779 
   11780 #define	ADD(ss, mm, dd)							\
   11781 do {									\
   11782 	aprint_normal("%s%s", sep, ss);					\
   11783 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11784 	sep = ", ";							\
   11785 } while (/*CONSTCOND*/0)
   11786 
   11787 	aprint_normal_dev(sc->sc_dev, "");
   11788 
   11789 	if (sc->sc_type == WM_T_I354) {
   11790 		uint32_t status;
   11791 
   11792 		status = CSR_READ(sc, WMREG_STATUS);
   11793 		if (((status & STATUS_2P5_SKU) != 0)
   11794 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11795 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11796 		} else
   11797 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11798 	} else if (sc->sc_type == WM_T_82545) {
   11799 		/* Only 82545 is LX (XXX except SFP) */
   11800 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11801 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11802 	} else {
   11803 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11804 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11805 	}
   11806 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11807 	aprint_normal("\n");
   11808 
   11809 #undef ADD
   11810 
   11811 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11812 }
   11813 
   11814 /*
   11815  * wm_tbi_mediachange:	[ifmedia interface function]
   11816  *
   11817  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11818  */
   11819 static int
   11820 wm_tbi_mediachange(struct ifnet *ifp)
   11821 {
   11822 	struct wm_softc *sc = ifp->if_softc;
   11823 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11824 	uint32_t status, ctrl;
   11825 	bool signal;
   11826 	int i;
   11827 
   11828 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11829 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11830 		/* XXX need some work for >= 82571 and < 82575 */
   11831 		if (sc->sc_type < WM_T_82575)
   11832 			return 0;
   11833 	}
   11834 
   11835 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11836 	    || (sc->sc_type >= WM_T_82575))
   11837 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11838 
   11839 	sc->sc_ctrl &= ~CTRL_LRST;
   11840 	sc->sc_txcw = TXCW_ANE;
   11841 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11842 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11843 	else if (ife->ifm_media & IFM_FDX)
   11844 		sc->sc_txcw |= TXCW_FD;
   11845 	else
   11846 		sc->sc_txcw |= TXCW_HD;
   11847 
   11848 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11849 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11850 
   11851 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11852 		device_xname(sc->sc_dev), sc->sc_txcw));
   11853 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11854 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11855 	CSR_WRITE_FLUSH(sc);
   11856 	delay(1000);
   11857 
   11858 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11859 	signal = wm_tbi_havesignal(sc, ctrl);
   11860 
   11861 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11862 		signal));
   11863 
   11864 	if (signal) {
   11865 		/* Have signal; wait for the link to come up. */
   11866 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11867 			delay(10000);
   11868 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11869 				break;
   11870 		}
   11871 
   11872 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11873 			device_xname(sc->sc_dev), i));
   11874 
   11875 		status = CSR_READ(sc, WMREG_STATUS);
   11876 		DPRINTF(WM_DEBUG_LINK,
   11877 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11878 			device_xname(sc->sc_dev), status, STATUS_LU));
   11879 		if (status & STATUS_LU) {
   11880 			/* Link is up. */
   11881 			DPRINTF(WM_DEBUG_LINK,
   11882 			    ("%s: LINK: set media -> link up %s\n",
   11883 				device_xname(sc->sc_dev),
   11884 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11885 
   11886 			/*
   11887 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11888 			 * so we should update sc->sc_ctrl
   11889 			 */
   11890 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11891 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11892 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11893 			if (status & STATUS_FD)
   11894 				sc->sc_tctl |=
   11895 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11896 			else
   11897 				sc->sc_tctl |=
   11898 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11899 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11900 				sc->sc_fcrtl |= FCRTL_XONE;
   11901 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11902 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11903 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11904 			sc->sc_tbi_linkup = 1;
   11905 		} else {
   11906 			if (i == WM_LINKUP_TIMEOUT)
   11907 				wm_check_for_link(sc);
   11908 			/* Link is down. */
   11909 			DPRINTF(WM_DEBUG_LINK,
   11910 			    ("%s: LINK: set media -> link down\n",
   11911 				device_xname(sc->sc_dev)));
   11912 			sc->sc_tbi_linkup = 0;
   11913 		}
   11914 	} else {
   11915 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11916 			device_xname(sc->sc_dev)));
   11917 		sc->sc_tbi_linkup = 0;
   11918 	}
   11919 
   11920 	wm_tbi_serdes_set_linkled(sc);
   11921 
   11922 	return 0;
   11923 }
   11924 
   11925 /*
   11926  * wm_tbi_mediastatus:	[ifmedia interface function]
   11927  *
   11928  *	Get the current interface media status on a 1000BASE-X device.
   11929  */
   11930 static void
   11931 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11932 {
   11933 	struct wm_softc *sc = ifp->if_softc;
   11934 	uint32_t ctrl, status;
   11935 
   11936 	ifmr->ifm_status = IFM_AVALID;
   11937 	ifmr->ifm_active = IFM_ETHER;
   11938 
   11939 	status = CSR_READ(sc, WMREG_STATUS);
   11940 	if ((status & STATUS_LU) == 0) {
   11941 		ifmr->ifm_active |= IFM_NONE;
   11942 		return;
   11943 	}
   11944 
   11945 	ifmr->ifm_status |= IFM_ACTIVE;
   11946 	/* Only 82545 is LX */
   11947 	if (sc->sc_type == WM_T_82545)
   11948 		ifmr->ifm_active |= IFM_1000_LX;
   11949 	else
   11950 		ifmr->ifm_active |= IFM_1000_SX;
   11951 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11952 		ifmr->ifm_active |= IFM_FDX;
   11953 	else
   11954 		ifmr->ifm_active |= IFM_HDX;
   11955 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11956 	if (ctrl & CTRL_RFCE)
   11957 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11958 	if (ctrl & CTRL_TFCE)
   11959 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11960 }
   11961 
   11962 /* XXX TBI only */
   11963 static int
   11964 wm_check_for_link(struct wm_softc *sc)
   11965 {
   11966 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11967 	uint32_t rxcw;
   11968 	uint32_t ctrl;
   11969 	uint32_t status;
   11970 	bool signal;
   11971 
   11972 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11973 		device_xname(sc->sc_dev), __func__));
   11974 
   11975 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11976 		/* XXX need some work for >= 82571 */
   11977 		if (sc->sc_type >= WM_T_82571) {
   11978 			sc->sc_tbi_linkup = 1;
   11979 			return 0;
   11980 		}
   11981 	}
   11982 
   11983 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11984 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11985 	status = CSR_READ(sc, WMREG_STATUS);
   11986 	signal = wm_tbi_havesignal(sc, ctrl);
   11987 
   11988 	DPRINTF(WM_DEBUG_LINK,
   11989 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11990 		device_xname(sc->sc_dev), __func__, signal,
   11991 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11992 
   11993 	/*
   11994 	 * SWDPIN   LU RXCW
   11995 	 *	0    0	  0
   11996 	 *	0    0	  1	(should not happen)
   11997 	 *	0    1	  0	(should not happen)
   11998 	 *	0    1	  1	(should not happen)
   11999 	 *	1    0	  0	Disable autonego and force linkup
   12000 	 *	1    0	  1	got /C/ but not linkup yet
   12001 	 *	1    1	  0	(linkup)
   12002 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12003 	 *
   12004 	 */
   12005 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12006 		DPRINTF(WM_DEBUG_LINK,
   12007 		    ("%s: %s: force linkup and fullduplex\n",
   12008 			device_xname(sc->sc_dev), __func__));
   12009 		sc->sc_tbi_linkup = 0;
   12010 		/* Disable auto-negotiation in the TXCW register */
   12011 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12012 
   12013 		/*
   12014 		 * Force link-up and also force full-duplex.
   12015 		 *
   12016 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12017 		 * so we should update sc->sc_ctrl
   12018 		 */
   12019 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12020 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12021 	} else if (((status & STATUS_LU) != 0)
   12022 	    && ((rxcw & RXCW_C) != 0)
   12023 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12024 		sc->sc_tbi_linkup = 1;
   12025 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12026 			device_xname(sc->sc_dev),
   12027 			__func__));
   12028 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12029 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12030 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12031 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12032 			device_xname(sc->sc_dev), __func__));
   12033 	} else {
   12034 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12035 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12036 			status));
   12037 	}
   12038 
   12039 	return 0;
   12040 }
   12041 
   12042 /*
   12043  * wm_tbi_tick:
   12044  *
   12045  *	Check the link on TBI devices.
   12046  *	This function acts as mii_tick().
   12047  */
   12048 static void
   12049 wm_tbi_tick(struct wm_softc *sc)
   12050 {
   12051 	struct mii_data *mii = &sc->sc_mii;
   12052 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12053 	uint32_t status;
   12054 
   12055 	KASSERT(WM_CORE_LOCKED(sc));
   12056 
   12057 	status = CSR_READ(sc, WMREG_STATUS);
   12058 
   12059 	/* XXX is this needed? */
   12060 	(void)CSR_READ(sc, WMREG_RXCW);
   12061 	(void)CSR_READ(sc, WMREG_CTRL);
   12062 
   12063 	/* set link status */
   12064 	if ((status & STATUS_LU) == 0) {
   12065 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12066 			device_xname(sc->sc_dev)));
   12067 		sc->sc_tbi_linkup = 0;
   12068 	} else if (sc->sc_tbi_linkup == 0) {
   12069 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12070 			device_xname(sc->sc_dev),
   12071 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12072 		sc->sc_tbi_linkup = 1;
   12073 		sc->sc_tbi_serdes_ticks = 0;
   12074 	}
   12075 
   12076 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12077 		goto setled;
   12078 
   12079 	if ((status & STATUS_LU) == 0) {
   12080 		sc->sc_tbi_linkup = 0;
   12081 		/* If the timer expired, retry autonegotiation */
   12082 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12083 		    && (++sc->sc_tbi_serdes_ticks
   12084 			>= sc->sc_tbi_serdes_anegticks)) {
   12085 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12086 			sc->sc_tbi_serdes_ticks = 0;
   12087 			/*
   12088 			 * Reset the link, and let autonegotiation do
   12089 			 * its thing
   12090 			 */
   12091 			sc->sc_ctrl |= CTRL_LRST;
   12092 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12093 			CSR_WRITE_FLUSH(sc);
   12094 			delay(1000);
   12095 			sc->sc_ctrl &= ~CTRL_LRST;
   12096 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12097 			CSR_WRITE_FLUSH(sc);
   12098 			delay(1000);
   12099 			CSR_WRITE(sc, WMREG_TXCW,
   12100 			    sc->sc_txcw & ~TXCW_ANE);
   12101 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12102 		}
   12103 	}
   12104 
   12105 setled:
   12106 	wm_tbi_serdes_set_linkled(sc);
   12107 }
   12108 
   12109 /* SERDES related */
   12110 static void
   12111 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12112 {
   12113 	uint32_t reg;
   12114 
   12115 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12116 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12117 		return;
   12118 
   12119 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12120 	reg |= PCS_CFG_PCS_EN;
   12121 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12122 
   12123 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12124 	reg &= ~CTRL_EXT_SWDPIN(3);
   12125 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12126 	CSR_WRITE_FLUSH(sc);
   12127 }
   12128 
   12129 static int
   12130 wm_serdes_mediachange(struct ifnet *ifp)
   12131 {
   12132 	struct wm_softc *sc = ifp->if_softc;
   12133 	bool pcs_autoneg = true; /* XXX */
   12134 	uint32_t ctrl_ext, pcs_lctl, reg;
   12135 
   12136 	/* XXX Currently, this function is not called on 8257[12] */
   12137 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12138 	    || (sc->sc_type >= WM_T_82575))
   12139 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12140 
   12141 	wm_serdes_power_up_link_82575(sc);
   12142 
   12143 	sc->sc_ctrl |= CTRL_SLU;
   12144 
   12145 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12146 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12147 
   12148 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12149 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12150 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12151 	case CTRL_EXT_LINK_MODE_SGMII:
   12152 		pcs_autoneg = true;
   12153 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12154 		break;
   12155 	case CTRL_EXT_LINK_MODE_1000KX:
   12156 		pcs_autoneg = false;
   12157 		/* FALLTHROUGH */
   12158 	default:
   12159 		if ((sc->sc_type == WM_T_82575)
   12160 		    || (sc->sc_type == WM_T_82576)) {
   12161 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12162 				pcs_autoneg = false;
   12163 		}
   12164 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12165 		    | CTRL_FRCFDX;
   12166 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12167 	}
   12168 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12169 
   12170 	if (pcs_autoneg) {
   12171 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12172 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12173 
   12174 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12175 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12176 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12177 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12178 	} else
   12179 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12180 
   12181 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12182 
   12183 
   12184 	return 0;
   12185 }
   12186 
   12187 static void
   12188 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12189 {
   12190 	struct wm_softc *sc = ifp->if_softc;
   12191 	struct mii_data *mii = &sc->sc_mii;
   12192 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12193 	uint32_t pcs_adv, pcs_lpab, reg;
   12194 
   12195 	ifmr->ifm_status = IFM_AVALID;
   12196 	ifmr->ifm_active = IFM_ETHER;
   12197 
   12198 	/* Check PCS */
   12199 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12200 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12201 		ifmr->ifm_active |= IFM_NONE;
   12202 		sc->sc_tbi_linkup = 0;
   12203 		goto setled;
   12204 	}
   12205 
   12206 	sc->sc_tbi_linkup = 1;
   12207 	ifmr->ifm_status |= IFM_ACTIVE;
   12208 	if (sc->sc_type == WM_T_I354) {
   12209 		uint32_t status;
   12210 
   12211 		status = CSR_READ(sc, WMREG_STATUS);
   12212 		if (((status & STATUS_2P5_SKU) != 0)
   12213 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12214 			ifmr->ifm_active |= IFM_2500_KX;
   12215 		} else
   12216 			ifmr->ifm_active |= IFM_1000_KX;
   12217 	} else {
   12218 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12219 		case PCS_LSTS_SPEED_10:
   12220 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12221 			break;
   12222 		case PCS_LSTS_SPEED_100:
   12223 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12224 			break;
   12225 		case PCS_LSTS_SPEED_1000:
   12226 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12227 			break;
   12228 		default:
   12229 			device_printf(sc->sc_dev, "Unknown speed\n");
   12230 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12231 			break;
   12232 		}
   12233 	}
   12234 	if ((reg & PCS_LSTS_FDX) != 0)
   12235 		ifmr->ifm_active |= IFM_FDX;
   12236 	else
   12237 		ifmr->ifm_active |= IFM_HDX;
   12238 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12239 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12240 		/* Check flow */
   12241 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12242 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12243 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12244 			goto setled;
   12245 		}
   12246 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12247 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12248 		DPRINTF(WM_DEBUG_LINK,
   12249 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12250 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12251 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12252 			mii->mii_media_active |= IFM_FLOW
   12253 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12254 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12255 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12256 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12257 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12258 			mii->mii_media_active |= IFM_FLOW
   12259 			    | IFM_ETH_TXPAUSE;
   12260 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12261 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12262 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12263 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12264 			mii->mii_media_active |= IFM_FLOW
   12265 			    | IFM_ETH_RXPAUSE;
   12266 		}
   12267 	}
   12268 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12269 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12270 setled:
   12271 	wm_tbi_serdes_set_linkled(sc);
   12272 }
   12273 
   12274 /*
   12275  * wm_serdes_tick:
   12276  *
   12277  *	Check the link on serdes devices.
   12278  */
   12279 static void
   12280 wm_serdes_tick(struct wm_softc *sc)
   12281 {
   12282 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12283 	struct mii_data *mii = &sc->sc_mii;
   12284 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12285 	uint32_t reg;
   12286 
   12287 	KASSERT(WM_CORE_LOCKED(sc));
   12288 
   12289 	mii->mii_media_status = IFM_AVALID;
   12290 	mii->mii_media_active = IFM_ETHER;
   12291 
   12292 	/* Check PCS */
   12293 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12294 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12295 		mii->mii_media_status |= IFM_ACTIVE;
   12296 		sc->sc_tbi_linkup = 1;
   12297 		sc->sc_tbi_serdes_ticks = 0;
   12298 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12299 		if ((reg & PCS_LSTS_FDX) != 0)
   12300 			mii->mii_media_active |= IFM_FDX;
   12301 		else
   12302 			mii->mii_media_active |= IFM_HDX;
   12303 	} else {
   12304 		mii->mii_media_status |= IFM_NONE;
   12305 		sc->sc_tbi_linkup = 0;
   12306 		/* If the timer expired, retry autonegotiation */
   12307 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12308 		    && (++sc->sc_tbi_serdes_ticks
   12309 			>= sc->sc_tbi_serdes_anegticks)) {
   12310 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12311 			sc->sc_tbi_serdes_ticks = 0;
   12312 			/* XXX */
   12313 			wm_serdes_mediachange(ifp);
   12314 		}
   12315 	}
   12316 
   12317 	wm_tbi_serdes_set_linkled(sc);
   12318 }
   12319 
   12320 /* SFP related */
   12321 
   12322 static int
   12323 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12324 {
   12325 	uint32_t i2ccmd;
   12326 	int i;
   12327 
   12328 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12329 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12330 
   12331 	/* Poll the ready bit */
   12332 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12333 		delay(50);
   12334 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12335 		if (i2ccmd & I2CCMD_READY)
   12336 			break;
   12337 	}
   12338 	if ((i2ccmd & I2CCMD_READY) == 0)
   12339 		return -1;
   12340 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12341 		return -1;
   12342 
   12343 	*data = i2ccmd & 0x00ff;
   12344 
   12345 	return 0;
   12346 }
   12347 
   12348 static uint32_t
   12349 wm_sfp_get_media_type(struct wm_softc *sc)
   12350 {
   12351 	uint32_t ctrl_ext;
   12352 	uint8_t val = 0;
   12353 	int timeout = 3;
   12354 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12355 	int rv = -1;
   12356 
   12357 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12358 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12359 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12360 	CSR_WRITE_FLUSH(sc);
   12361 
   12362 	/* Read SFP module data */
   12363 	while (timeout) {
   12364 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12365 		if (rv == 0)
   12366 			break;
   12367 		delay(100*1000); /* XXX too big */
   12368 		timeout--;
   12369 	}
   12370 	if (rv != 0)
   12371 		goto out;
   12372 	switch (val) {
   12373 	case SFF_SFP_ID_SFF:
   12374 		aprint_normal_dev(sc->sc_dev,
   12375 		    "Module/Connector soldered to board\n");
   12376 		break;
   12377 	case SFF_SFP_ID_SFP:
   12378 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12379 		break;
   12380 	case SFF_SFP_ID_UNKNOWN:
   12381 		goto out;
   12382 	default:
   12383 		break;
   12384 	}
   12385 
   12386 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12387 	if (rv != 0) {
   12388 		goto out;
   12389 	}
   12390 
   12391 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12392 		mediatype = WM_MEDIATYPE_SERDES;
   12393 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12394 		sc->sc_flags |= WM_F_SGMII;
   12395 		mediatype = WM_MEDIATYPE_COPPER;
   12396 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12397 		sc->sc_flags |= WM_F_SGMII;
   12398 		mediatype = WM_MEDIATYPE_SERDES;
   12399 	}
   12400 
   12401 out:
   12402 	/* Restore I2C interface setting */
   12403 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12404 
   12405 	return mediatype;
   12406 }
   12407 
   12408 /*
   12409  * NVM related.
   12410  * Microwire, SPI (w/wo EERD) and Flash.
   12411  */
   12412 
   12413 /* Both spi and uwire */
   12414 
   12415 /*
   12416  * wm_eeprom_sendbits:
   12417  *
   12418  *	Send a series of bits to the EEPROM.
   12419  */
   12420 static void
   12421 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12422 {
   12423 	uint32_t reg;
   12424 	int x;
   12425 
   12426 	reg = CSR_READ(sc, WMREG_EECD);
   12427 
   12428 	for (x = nbits; x > 0; x--) {
   12429 		if (bits & (1U << (x - 1)))
   12430 			reg |= EECD_DI;
   12431 		else
   12432 			reg &= ~EECD_DI;
   12433 		CSR_WRITE(sc, WMREG_EECD, reg);
   12434 		CSR_WRITE_FLUSH(sc);
   12435 		delay(2);
   12436 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12437 		CSR_WRITE_FLUSH(sc);
   12438 		delay(2);
   12439 		CSR_WRITE(sc, WMREG_EECD, reg);
   12440 		CSR_WRITE_FLUSH(sc);
   12441 		delay(2);
   12442 	}
   12443 }
   12444 
   12445 /*
   12446  * wm_eeprom_recvbits:
   12447  *
   12448  *	Receive a series of bits from the EEPROM.
   12449  */
   12450 static void
   12451 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12452 {
   12453 	uint32_t reg, val;
   12454 	int x;
   12455 
   12456 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12457 
   12458 	val = 0;
   12459 	for (x = nbits; x > 0; x--) {
   12460 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12461 		CSR_WRITE_FLUSH(sc);
   12462 		delay(2);
   12463 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12464 			val |= (1U << (x - 1));
   12465 		CSR_WRITE(sc, WMREG_EECD, reg);
   12466 		CSR_WRITE_FLUSH(sc);
   12467 		delay(2);
   12468 	}
   12469 	*valp = val;
   12470 }
   12471 
   12472 /* Microwire */
   12473 
   12474 /*
   12475  * wm_nvm_read_uwire:
   12476  *
   12477  *	Read a word from the EEPROM using the MicroWire protocol.
   12478  */
   12479 static int
   12480 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12481 {
   12482 	uint32_t reg, val;
   12483 	int i;
   12484 
   12485 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12486 		device_xname(sc->sc_dev), __func__));
   12487 
   12488 	if (sc->nvm.acquire(sc) != 0)
   12489 		return -1;
   12490 
   12491 	for (i = 0; i < wordcnt; i++) {
   12492 		/* Clear SK and DI. */
   12493 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12494 		CSR_WRITE(sc, WMREG_EECD, reg);
   12495 
   12496 		/*
   12497 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12498 		 * and Xen.
   12499 		 *
   12500 		 * We use this workaround only for 82540 because qemu's
   12501 		 * e1000 act as 82540.
   12502 		 */
   12503 		if (sc->sc_type == WM_T_82540) {
   12504 			reg |= EECD_SK;
   12505 			CSR_WRITE(sc, WMREG_EECD, reg);
   12506 			reg &= ~EECD_SK;
   12507 			CSR_WRITE(sc, WMREG_EECD, reg);
   12508 			CSR_WRITE_FLUSH(sc);
   12509 			delay(2);
   12510 		}
   12511 		/* XXX: end of workaround */
   12512 
   12513 		/* Set CHIP SELECT. */
   12514 		reg |= EECD_CS;
   12515 		CSR_WRITE(sc, WMREG_EECD, reg);
   12516 		CSR_WRITE_FLUSH(sc);
   12517 		delay(2);
   12518 
   12519 		/* Shift in the READ command. */
   12520 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12521 
   12522 		/* Shift in address. */
   12523 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12524 
   12525 		/* Shift out the data. */
   12526 		wm_eeprom_recvbits(sc, &val, 16);
   12527 		data[i] = val & 0xffff;
   12528 
   12529 		/* Clear CHIP SELECT. */
   12530 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12531 		CSR_WRITE(sc, WMREG_EECD, reg);
   12532 		CSR_WRITE_FLUSH(sc);
   12533 		delay(2);
   12534 	}
   12535 
   12536 	sc->nvm.release(sc);
   12537 	return 0;
   12538 }
   12539 
   12540 /* SPI */
   12541 
   12542 /*
   12543  * Set SPI and FLASH related information from the EECD register.
   12544  * For 82541 and 82547, the word size is taken from EEPROM.
   12545  */
   12546 static int
   12547 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12548 {
   12549 	int size;
   12550 	uint32_t reg;
   12551 	uint16_t data;
   12552 
   12553 	reg = CSR_READ(sc, WMREG_EECD);
   12554 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12555 
   12556 	/* Read the size of NVM from EECD by default */
   12557 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12558 	switch (sc->sc_type) {
   12559 	case WM_T_82541:
   12560 	case WM_T_82541_2:
   12561 	case WM_T_82547:
   12562 	case WM_T_82547_2:
   12563 		/* Set dummy value to access EEPROM */
   12564 		sc->sc_nvm_wordsize = 64;
   12565 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12566 			aprint_error_dev(sc->sc_dev,
   12567 			    "%s: failed to read EEPROM size\n", __func__);
   12568 		}
   12569 		reg = data;
   12570 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12571 		if (size == 0)
   12572 			size = 6; /* 64 word size */
   12573 		else
   12574 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12575 		break;
   12576 	case WM_T_80003:
   12577 	case WM_T_82571:
   12578 	case WM_T_82572:
   12579 	case WM_T_82573: /* SPI case */
   12580 	case WM_T_82574: /* SPI case */
   12581 	case WM_T_82583: /* SPI case */
   12582 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12583 		if (size > 14)
   12584 			size = 14;
   12585 		break;
   12586 	case WM_T_82575:
   12587 	case WM_T_82576:
   12588 	case WM_T_82580:
   12589 	case WM_T_I350:
   12590 	case WM_T_I354:
   12591 	case WM_T_I210:
   12592 	case WM_T_I211:
   12593 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12594 		if (size > 15)
   12595 			size = 15;
   12596 		break;
   12597 	default:
   12598 		aprint_error_dev(sc->sc_dev,
   12599 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12600 		return -1;
   12601 		break;
   12602 	}
   12603 
   12604 	sc->sc_nvm_wordsize = 1 << size;
   12605 
   12606 	return 0;
   12607 }
   12608 
   12609 /*
   12610  * wm_nvm_ready_spi:
   12611  *
   12612  *	Wait for a SPI EEPROM to be ready for commands.
   12613  */
   12614 static int
   12615 wm_nvm_ready_spi(struct wm_softc *sc)
   12616 {
   12617 	uint32_t val;
   12618 	int usec;
   12619 
   12620 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12621 		device_xname(sc->sc_dev), __func__));
   12622 
   12623 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12624 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12625 		wm_eeprom_recvbits(sc, &val, 8);
   12626 		if ((val & SPI_SR_RDY) == 0)
   12627 			break;
   12628 	}
   12629 	if (usec >= SPI_MAX_RETRIES) {
   12630 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12631 		return -1;
   12632 	}
   12633 	return 0;
   12634 }
   12635 
   12636 /*
   12637  * wm_nvm_read_spi:
   12638  *
   12639  *	Read a work from the EEPROM using the SPI protocol.
   12640  */
   12641 static int
   12642 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12643 {
   12644 	uint32_t reg, val;
   12645 	int i;
   12646 	uint8_t opc;
   12647 	int rv = 0;
   12648 
   12649 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12650 		device_xname(sc->sc_dev), __func__));
   12651 
   12652 	if (sc->nvm.acquire(sc) != 0)
   12653 		return -1;
   12654 
   12655 	/* Clear SK and CS. */
   12656 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12657 	CSR_WRITE(sc, WMREG_EECD, reg);
   12658 	CSR_WRITE_FLUSH(sc);
   12659 	delay(2);
   12660 
   12661 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12662 		goto out;
   12663 
   12664 	/* Toggle CS to flush commands. */
   12665 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12666 	CSR_WRITE_FLUSH(sc);
   12667 	delay(2);
   12668 	CSR_WRITE(sc, WMREG_EECD, reg);
   12669 	CSR_WRITE_FLUSH(sc);
   12670 	delay(2);
   12671 
   12672 	opc = SPI_OPC_READ;
   12673 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12674 		opc |= SPI_OPC_A8;
   12675 
   12676 	wm_eeprom_sendbits(sc, opc, 8);
   12677 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12678 
   12679 	for (i = 0; i < wordcnt; i++) {
   12680 		wm_eeprom_recvbits(sc, &val, 16);
   12681 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12682 	}
   12683 
   12684 	/* Raise CS and clear SK. */
   12685 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12686 	CSR_WRITE(sc, WMREG_EECD, reg);
   12687 	CSR_WRITE_FLUSH(sc);
   12688 	delay(2);
   12689 
   12690 out:
   12691 	sc->nvm.release(sc);
   12692 	return rv;
   12693 }
   12694 
   12695 /* Using with EERD */
   12696 
   12697 static int
   12698 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12699 {
   12700 	uint32_t attempts = 100000;
   12701 	uint32_t i, reg = 0;
   12702 	int32_t done = -1;
   12703 
   12704 	for (i = 0; i < attempts; i++) {
   12705 		reg = CSR_READ(sc, rw);
   12706 
   12707 		if (reg & EERD_DONE) {
   12708 			done = 0;
   12709 			break;
   12710 		}
   12711 		delay(5);
   12712 	}
   12713 
   12714 	return done;
   12715 }
   12716 
   12717 static int
   12718 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12719 {
   12720 	int i, eerd = 0;
   12721 	int rv = 0;
   12722 
   12723 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12724 		device_xname(sc->sc_dev), __func__));
   12725 
   12726 	if (sc->nvm.acquire(sc) != 0)
   12727 		return -1;
   12728 
   12729 	for (i = 0; i < wordcnt; i++) {
   12730 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12731 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12732 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12733 		if (rv != 0) {
   12734 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12735 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12736 			break;
   12737 		}
   12738 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12739 	}
   12740 
   12741 	sc->nvm.release(sc);
   12742 	return rv;
   12743 }
   12744 
   12745 /* Flash */
   12746 
   12747 static int
   12748 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12749 {
   12750 	uint32_t eecd;
   12751 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12752 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12753 	uint32_t nvm_dword = 0;
   12754 	uint8_t sig_byte = 0;
   12755 	int rv;
   12756 
   12757 	switch (sc->sc_type) {
   12758 	case WM_T_PCH_SPT:
   12759 	case WM_T_PCH_CNP:
   12760 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12761 		act_offset = ICH_NVM_SIG_WORD * 2;
   12762 
   12763 		/* Set bank to 0 in case flash read fails. */
   12764 		*bank = 0;
   12765 
   12766 		/* Check bank 0 */
   12767 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12768 		if (rv != 0)
   12769 			return rv;
   12770 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12771 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12772 			*bank = 0;
   12773 			return 0;
   12774 		}
   12775 
   12776 		/* Check bank 1 */
   12777 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12778 		    &nvm_dword);
   12779 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12780 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12781 			*bank = 1;
   12782 			return 0;
   12783 		}
   12784 		aprint_error_dev(sc->sc_dev,
   12785 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12786 		return -1;
   12787 	case WM_T_ICH8:
   12788 	case WM_T_ICH9:
   12789 		eecd = CSR_READ(sc, WMREG_EECD);
   12790 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12791 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12792 			return 0;
   12793 		}
   12794 		/* FALLTHROUGH */
   12795 	default:
   12796 		/* Default to 0 */
   12797 		*bank = 0;
   12798 
   12799 		/* Check bank 0 */
   12800 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12801 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12802 			*bank = 0;
   12803 			return 0;
   12804 		}
   12805 
   12806 		/* Check bank 1 */
   12807 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12808 		    &sig_byte);
   12809 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12810 			*bank = 1;
   12811 			return 0;
   12812 		}
   12813 	}
   12814 
   12815 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12816 		device_xname(sc->sc_dev)));
   12817 	return -1;
   12818 }
   12819 
   12820 /******************************************************************************
   12821  * This function does initial flash setup so that a new read/write/erase cycle
   12822  * can be started.
   12823  *
   12824  * sc - The pointer to the hw structure
   12825  ****************************************************************************/
   12826 static int32_t
   12827 wm_ich8_cycle_init(struct wm_softc *sc)
   12828 {
   12829 	uint16_t hsfsts;
   12830 	int32_t error = 1;
   12831 	int32_t i     = 0;
   12832 
   12833 	if (sc->sc_type >= WM_T_PCH_SPT)
   12834 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12835 	else
   12836 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12837 
   12838 	/* May be check the Flash Des Valid bit in Hw status */
   12839 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12840 		return error;
   12841 
   12842 	/* Clear FCERR in Hw status by writing 1 */
   12843 	/* Clear DAEL in Hw status by writing a 1 */
   12844 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12845 
   12846 	if (sc->sc_type >= WM_T_PCH_SPT)
   12847 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12848 	else
   12849 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12850 
   12851 	/*
   12852 	 * Either we should have a hardware SPI cycle in progress bit to check
   12853 	 * against, in order to start a new cycle or FDONE bit should be
   12854 	 * changed in the hardware so that it is 1 after harware reset, which
   12855 	 * can then be used as an indication whether a cycle is in progress or
   12856 	 * has been completed .. we should also have some software semaphore
   12857 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12858 	 * threads access to those bits can be sequentiallized or a way so that
   12859 	 * 2 threads dont start the cycle at the same time
   12860 	 */
   12861 
   12862 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12863 		/*
   12864 		 * There is no cycle running at present, so we can start a
   12865 		 * cycle
   12866 		 */
   12867 
   12868 		/* Begin by setting Flash Cycle Done. */
   12869 		hsfsts |= HSFSTS_DONE;
   12870 		if (sc->sc_type >= WM_T_PCH_SPT)
   12871 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12872 			    hsfsts & 0xffffUL);
   12873 		else
   12874 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12875 		error = 0;
   12876 	} else {
   12877 		/*
   12878 		 * Otherwise poll for sometime so the current cycle has a
   12879 		 * chance to end before giving up.
   12880 		 */
   12881 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12882 			if (sc->sc_type >= WM_T_PCH_SPT)
   12883 				hsfsts = ICH8_FLASH_READ32(sc,
   12884 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12885 			else
   12886 				hsfsts = ICH8_FLASH_READ16(sc,
   12887 				    ICH_FLASH_HSFSTS);
   12888 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12889 				error = 0;
   12890 				break;
   12891 			}
   12892 			delay(1);
   12893 		}
   12894 		if (error == 0) {
   12895 			/*
   12896 			 * Successful in waiting for previous cycle to timeout,
   12897 			 * now set the Flash Cycle Done.
   12898 			 */
   12899 			hsfsts |= HSFSTS_DONE;
   12900 			if (sc->sc_type >= WM_T_PCH_SPT)
   12901 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12902 				    hsfsts & 0xffffUL);
   12903 			else
   12904 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12905 				    hsfsts);
   12906 		}
   12907 	}
   12908 	return error;
   12909 }
   12910 
   12911 /******************************************************************************
   12912  * This function starts a flash cycle and waits for its completion
   12913  *
   12914  * sc - The pointer to the hw structure
   12915  ****************************************************************************/
   12916 static int32_t
   12917 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12918 {
   12919 	uint16_t hsflctl;
   12920 	uint16_t hsfsts;
   12921 	int32_t error = 1;
   12922 	uint32_t i = 0;
   12923 
   12924 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12925 	if (sc->sc_type >= WM_T_PCH_SPT)
   12926 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12927 	else
   12928 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12929 	hsflctl |= HSFCTL_GO;
   12930 	if (sc->sc_type >= WM_T_PCH_SPT)
   12931 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12932 		    (uint32_t)hsflctl << 16);
   12933 	else
   12934 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12935 
   12936 	/* Wait till FDONE bit is set to 1 */
   12937 	do {
   12938 		if (sc->sc_type >= WM_T_PCH_SPT)
   12939 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12940 			    & 0xffffUL;
   12941 		else
   12942 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12943 		if (hsfsts & HSFSTS_DONE)
   12944 			break;
   12945 		delay(1);
   12946 		i++;
   12947 	} while (i < timeout);
   12948 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12949 		error = 0;
   12950 
   12951 	return error;
   12952 }
   12953 
   12954 /******************************************************************************
   12955  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12956  *
   12957  * sc - The pointer to the hw structure
   12958  * index - The index of the byte or word to read.
   12959  * size - Size of data to read, 1=byte 2=word, 4=dword
   12960  * data - Pointer to the word to store the value read.
   12961  *****************************************************************************/
   12962 static int32_t
   12963 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12964     uint32_t size, uint32_t *data)
   12965 {
   12966 	uint16_t hsfsts;
   12967 	uint16_t hsflctl;
   12968 	uint32_t flash_linear_address;
   12969 	uint32_t flash_data = 0;
   12970 	int32_t error = 1;
   12971 	int32_t count = 0;
   12972 
   12973 	if (size < 1  || size > 4 || data == 0x0 ||
   12974 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12975 		return error;
   12976 
   12977 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12978 	    sc->sc_ich8_flash_base;
   12979 
   12980 	do {
   12981 		delay(1);
   12982 		/* Steps */
   12983 		error = wm_ich8_cycle_init(sc);
   12984 		if (error)
   12985 			break;
   12986 
   12987 		if (sc->sc_type >= WM_T_PCH_SPT)
   12988 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12989 			    >> 16;
   12990 		else
   12991 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12992 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12993 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12994 		    & HSFCTL_BCOUNT_MASK;
   12995 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12996 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12997 			/*
   12998 			 * In SPT, This register is in Lan memory space, not
   12999 			 * flash. Therefore, only 32 bit access is supported.
   13000 			 */
   13001 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13002 			    (uint32_t)hsflctl << 16);
   13003 		} else
   13004 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13005 
   13006 		/*
   13007 		 * Write the last 24 bits of index into Flash Linear address
   13008 		 * field in Flash Address
   13009 		 */
   13010 		/* TODO: TBD maybe check the index against the size of flash */
   13011 
   13012 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13013 
   13014 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13015 
   13016 		/*
   13017 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13018 		 * the whole sequence a few more times, else read in (shift in)
   13019 		 * the Flash Data0, the order is least significant byte first
   13020 		 * msb to lsb
   13021 		 */
   13022 		if (error == 0) {
   13023 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13024 			if (size == 1)
   13025 				*data = (uint8_t)(flash_data & 0x000000FF);
   13026 			else if (size == 2)
   13027 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13028 			else if (size == 4)
   13029 				*data = (uint32_t)flash_data;
   13030 			break;
   13031 		} else {
   13032 			/*
   13033 			 * If we've gotten here, then things are probably
   13034 			 * completely hosed, but if the error condition is
   13035 			 * detected, it won't hurt to give it another try...
   13036 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13037 			 */
   13038 			if (sc->sc_type >= WM_T_PCH_SPT)
   13039 				hsfsts = ICH8_FLASH_READ32(sc,
   13040 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13041 			else
   13042 				hsfsts = ICH8_FLASH_READ16(sc,
   13043 				    ICH_FLASH_HSFSTS);
   13044 
   13045 			if (hsfsts & HSFSTS_ERR) {
   13046 				/* Repeat for some time before giving up. */
   13047 				continue;
   13048 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13049 				break;
   13050 		}
   13051 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13052 
   13053 	return error;
   13054 }
   13055 
   13056 /******************************************************************************
   13057  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13058  *
   13059  * sc - pointer to wm_hw structure
   13060  * index - The index of the byte to read.
   13061  * data - Pointer to a byte to store the value read.
   13062  *****************************************************************************/
   13063 static int32_t
   13064 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13065 {
   13066 	int32_t status;
   13067 	uint32_t word = 0;
   13068 
   13069 	status = wm_read_ich8_data(sc, index, 1, &word);
   13070 	if (status == 0)
   13071 		*data = (uint8_t)word;
   13072 	else
   13073 		*data = 0;
   13074 
   13075 	return status;
   13076 }
   13077 
   13078 /******************************************************************************
   13079  * Reads a word from the NVM using the ICH8 flash access registers.
   13080  *
   13081  * sc - pointer to wm_hw structure
   13082  * index - The starting byte index of the word to read.
   13083  * data - Pointer to a word to store the value read.
   13084  *****************************************************************************/
   13085 static int32_t
   13086 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13087 {
   13088 	int32_t status;
   13089 	uint32_t word = 0;
   13090 
   13091 	status = wm_read_ich8_data(sc, index, 2, &word);
   13092 	if (status == 0)
   13093 		*data = (uint16_t)word;
   13094 	else
   13095 		*data = 0;
   13096 
   13097 	return status;
   13098 }
   13099 
   13100 /******************************************************************************
   13101  * Reads a dword from the NVM using the ICH8 flash access registers.
   13102  *
   13103  * sc - pointer to wm_hw structure
   13104  * index - The starting byte index of the word to read.
   13105  * data - Pointer to a word to store the value read.
   13106  *****************************************************************************/
   13107 static int32_t
   13108 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13109 {
   13110 	int32_t status;
   13111 
   13112 	status = wm_read_ich8_data(sc, index, 4, data);
   13113 	return status;
   13114 }
   13115 
   13116 /******************************************************************************
   13117  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13118  * register.
   13119  *
   13120  * sc - Struct containing variables accessed by shared code
   13121  * offset - offset of word in the EEPROM to read
   13122  * data - word read from the EEPROM
   13123  * words - number of words to read
   13124  *****************************************************************************/
   13125 static int
   13126 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13127 {
   13128 	int32_t	 rv = 0;
   13129 	uint32_t flash_bank = 0;
   13130 	uint32_t act_offset = 0;
   13131 	uint32_t bank_offset = 0;
   13132 	uint16_t word = 0;
   13133 	uint16_t i = 0;
   13134 
   13135 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13136 		device_xname(sc->sc_dev), __func__));
   13137 
   13138 	if (sc->nvm.acquire(sc) != 0)
   13139 		return -1;
   13140 
   13141 	/*
   13142 	 * We need to know which is the valid flash bank.  In the event
   13143 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13144 	 * managing flash_bank. So it cannot be trusted and needs
   13145 	 * to be updated with each read.
   13146 	 */
   13147 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13148 	if (rv) {
   13149 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13150 			device_xname(sc->sc_dev)));
   13151 		flash_bank = 0;
   13152 	}
   13153 
   13154 	/*
   13155 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13156 	 * size
   13157 	 */
   13158 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13159 
   13160 	for (i = 0; i < words; i++) {
   13161 		/* The NVM part needs a byte offset, hence * 2 */
   13162 		act_offset = bank_offset + ((offset + i) * 2);
   13163 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13164 		if (rv) {
   13165 			aprint_error_dev(sc->sc_dev,
   13166 			    "%s: failed to read NVM\n", __func__);
   13167 			break;
   13168 		}
   13169 		data[i] = word;
   13170 	}
   13171 
   13172 	sc->nvm.release(sc);
   13173 	return rv;
   13174 }
   13175 
   13176 /******************************************************************************
   13177  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13178  * register.
   13179  *
   13180  * sc - Struct containing variables accessed by shared code
   13181  * offset - offset of word in the EEPROM to read
   13182  * data - word read from the EEPROM
   13183  * words - number of words to read
   13184  *****************************************************************************/
   13185 static int
   13186 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13187 {
   13188 	int32_t	 rv = 0;
   13189 	uint32_t flash_bank = 0;
   13190 	uint32_t act_offset = 0;
   13191 	uint32_t bank_offset = 0;
   13192 	uint32_t dword = 0;
   13193 	uint16_t i = 0;
   13194 
   13195 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13196 		device_xname(sc->sc_dev), __func__));
   13197 
   13198 	if (sc->nvm.acquire(sc) != 0)
   13199 		return -1;
   13200 
   13201 	/*
   13202 	 * We need to know which is the valid flash bank.  In the event
   13203 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13204 	 * managing flash_bank. So it cannot be trusted and needs
   13205 	 * to be updated with each read.
   13206 	 */
   13207 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13208 	if (rv) {
   13209 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13210 			device_xname(sc->sc_dev)));
   13211 		flash_bank = 0;
   13212 	}
   13213 
   13214 	/*
   13215 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13216 	 * size
   13217 	 */
   13218 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13219 
   13220 	for (i = 0; i < words; i++) {
   13221 		/* The NVM part needs a byte offset, hence * 2 */
   13222 		act_offset = bank_offset + ((offset + i) * 2);
   13223 		/* but we must read dword aligned, so mask ... */
   13224 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13225 		if (rv) {
   13226 			aprint_error_dev(sc->sc_dev,
   13227 			    "%s: failed to read NVM\n", __func__);
   13228 			break;
   13229 		}
   13230 		/* ... and pick out low or high word */
   13231 		if ((act_offset & 0x2) == 0)
   13232 			data[i] = (uint16_t)(dword & 0xFFFF);
   13233 		else
   13234 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13235 	}
   13236 
   13237 	sc->nvm.release(sc);
   13238 	return rv;
   13239 }
   13240 
   13241 /* iNVM */
   13242 
   13243 static int
   13244 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13245 {
   13246 	int32_t	 rv = 0;
   13247 	uint32_t invm_dword;
   13248 	uint16_t i;
   13249 	uint8_t record_type, word_address;
   13250 
   13251 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13252 		device_xname(sc->sc_dev), __func__));
   13253 
   13254 	for (i = 0; i < INVM_SIZE; i++) {
   13255 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13256 		/* Get record type */
   13257 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13258 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13259 			break;
   13260 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13261 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13262 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13263 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13264 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13265 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13266 			if (word_address == address) {
   13267 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13268 				rv = 0;
   13269 				break;
   13270 			}
   13271 		}
   13272 	}
   13273 
   13274 	return rv;
   13275 }
   13276 
   13277 static int
   13278 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13279 {
   13280 	int rv = 0;
   13281 	int i;
   13282 
   13283 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13284 		device_xname(sc->sc_dev), __func__));
   13285 
   13286 	if (sc->nvm.acquire(sc) != 0)
   13287 		return -1;
   13288 
   13289 	for (i = 0; i < words; i++) {
   13290 		switch (offset + i) {
   13291 		case NVM_OFF_MACADDR:
   13292 		case NVM_OFF_MACADDR1:
   13293 		case NVM_OFF_MACADDR2:
   13294 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13295 			if (rv != 0) {
   13296 				data[i] = 0xffff;
   13297 				rv = -1;
   13298 			}
   13299 			break;
   13300 		case NVM_OFF_CFG2:
   13301 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13302 			if (rv != 0) {
   13303 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13304 				rv = 0;
   13305 			}
   13306 			break;
   13307 		case NVM_OFF_CFG4:
   13308 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13309 			if (rv != 0) {
   13310 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13311 				rv = 0;
   13312 			}
   13313 			break;
   13314 		case NVM_OFF_LED_1_CFG:
   13315 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13316 			if (rv != 0) {
   13317 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13318 				rv = 0;
   13319 			}
   13320 			break;
   13321 		case NVM_OFF_LED_0_2_CFG:
   13322 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13323 			if (rv != 0) {
   13324 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13325 				rv = 0;
   13326 			}
   13327 			break;
   13328 		case NVM_OFF_ID_LED_SETTINGS:
   13329 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13330 			if (rv != 0) {
   13331 				*data = ID_LED_RESERVED_FFFF;
   13332 				rv = 0;
   13333 			}
   13334 			break;
   13335 		default:
   13336 			DPRINTF(WM_DEBUG_NVM,
   13337 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13338 			*data = NVM_RESERVED_WORD;
   13339 			break;
   13340 		}
   13341 	}
   13342 
   13343 	sc->nvm.release(sc);
   13344 	return rv;
   13345 }
   13346 
   13347 /* Lock, detecting NVM type, validate checksum, version and read */
   13348 
   13349 static int
   13350 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13351 {
   13352 	uint32_t eecd = 0;
   13353 
   13354 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13355 	    || sc->sc_type == WM_T_82583) {
   13356 		eecd = CSR_READ(sc, WMREG_EECD);
   13357 
   13358 		/* Isolate bits 15 & 16 */
   13359 		eecd = ((eecd >> 15) & 0x03);
   13360 
   13361 		/* If both bits are set, device is Flash type */
   13362 		if (eecd == 0x03)
   13363 			return 0;
   13364 	}
   13365 	return 1;
   13366 }
   13367 
   13368 static int
   13369 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13370 {
   13371 	uint32_t eec;
   13372 
   13373 	eec = CSR_READ(sc, WMREG_EEC);
   13374 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13375 		return 1;
   13376 
   13377 	return 0;
   13378 }
   13379 
   13380 /*
   13381  * wm_nvm_validate_checksum
   13382  *
   13383  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13384  */
   13385 static int
   13386 wm_nvm_validate_checksum(struct wm_softc *sc)
   13387 {
   13388 	uint16_t checksum;
   13389 	uint16_t eeprom_data;
   13390 #ifdef WM_DEBUG
   13391 	uint16_t csum_wordaddr, valid_checksum;
   13392 #endif
   13393 	int i;
   13394 
   13395 	checksum = 0;
   13396 
   13397 	/* Don't check for I211 */
   13398 	if (sc->sc_type == WM_T_I211)
   13399 		return 0;
   13400 
   13401 #ifdef WM_DEBUG
   13402 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13403 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13404 		csum_wordaddr = NVM_OFF_COMPAT;
   13405 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13406 	} else {
   13407 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13408 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13409 	}
   13410 
   13411 	/* Dump EEPROM image for debug */
   13412 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13413 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13414 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13415 		/* XXX PCH_SPT? */
   13416 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13417 		if ((eeprom_data & valid_checksum) == 0)
   13418 			DPRINTF(WM_DEBUG_NVM,
   13419 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13420 				device_xname(sc->sc_dev), eeprom_data,
   13421 				    valid_checksum));
   13422 	}
   13423 
   13424 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13425 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13426 		for (i = 0; i < NVM_SIZE; i++) {
   13427 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13428 				printf("XXXX ");
   13429 			else
   13430 				printf("%04hx ", eeprom_data);
   13431 			if (i % 8 == 7)
   13432 				printf("\n");
   13433 		}
   13434 	}
   13435 
   13436 #endif /* WM_DEBUG */
   13437 
   13438 	for (i = 0; i < NVM_SIZE; i++) {
   13439 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13440 			return 1;
   13441 		checksum += eeprom_data;
   13442 	}
   13443 
   13444 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13445 #ifdef WM_DEBUG
   13446 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13447 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13448 #endif
   13449 	}
   13450 
   13451 	return 0;
   13452 }
   13453 
   13454 static void
   13455 wm_nvm_version_invm(struct wm_softc *sc)
   13456 {
   13457 	uint32_t dword;
   13458 
   13459 	/*
   13460 	 * Linux's code to decode version is very strange, so we don't
   13461 	 * obey that algorithm and just use word 61 as the document.
   13462 	 * Perhaps it's not perfect though...
   13463 	 *
   13464 	 * Example:
   13465 	 *
   13466 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13467 	 */
   13468 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13469 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13470 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13471 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13472 }
   13473 
   13474 static void
   13475 wm_nvm_version(struct wm_softc *sc)
   13476 {
   13477 	uint16_t major, minor, build, patch;
   13478 	uint16_t uid0, uid1;
   13479 	uint16_t nvm_data;
   13480 	uint16_t off;
   13481 	bool check_version = false;
   13482 	bool check_optionrom = false;
   13483 	bool have_build = false;
   13484 	bool have_uid = true;
   13485 
   13486 	/*
   13487 	 * Version format:
   13488 	 *
   13489 	 * XYYZ
   13490 	 * X0YZ
   13491 	 * X0YY
   13492 	 *
   13493 	 * Example:
   13494 	 *
   13495 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13496 	 *	82571	0x50a6	5.10.6?
   13497 	 *	82572	0x506a	5.6.10?
   13498 	 *	82572EI	0x5069	5.6.9?
   13499 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13500 	 *		0x2013	2.1.3?
   13501 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13502 	 * ICH8+82567	0x0040	0.4.0?
   13503 	 * ICH9+82566	0x1040	1.4.0?
   13504 	 *ICH10+82567	0x0043	0.4.3?
   13505 	 *  PCH+82577	0x00c1	0.12.1?
   13506 	 * PCH2+82579	0x00d3	0.13.3?
   13507 	 *		0x00d4	0.13.4?
   13508 	 *  LPT+I218	0x0023	0.2.3?
   13509 	 *  SPT+I219	0x0084	0.8.4?
   13510 	 *  CNP+I219	0x0054	0.5.4?
   13511 	 */
   13512 
   13513 	/*
   13514 	 * XXX
   13515 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13516 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13517 	 */
   13518 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13519 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13520 		have_uid = false;
   13521 
   13522 	switch (sc->sc_type) {
   13523 	case WM_T_82571:
   13524 	case WM_T_82572:
   13525 	case WM_T_82574:
   13526 	case WM_T_82583:
   13527 		check_version = true;
   13528 		check_optionrom = true;
   13529 		have_build = true;
   13530 		break;
   13531 	case WM_T_ICH8:
   13532 	case WM_T_ICH9:
   13533 	case WM_T_ICH10:
   13534 	case WM_T_PCH:
   13535 	case WM_T_PCH2:
   13536 	case WM_T_PCH_LPT:
   13537 	case WM_T_PCH_SPT:
   13538 	case WM_T_PCH_CNP:
   13539 		check_version = true;
   13540 		have_build = true;
   13541 		have_uid = false;
   13542 		break;
   13543 	case WM_T_82575:
   13544 	case WM_T_82576:
   13545 	case WM_T_82580:
   13546 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13547 			check_version = true;
   13548 		break;
   13549 	case WM_T_I211:
   13550 		wm_nvm_version_invm(sc);
   13551 		have_uid = false;
   13552 		goto printver;
   13553 	case WM_T_I210:
   13554 		if (!wm_nvm_flash_presence_i210(sc)) {
   13555 			wm_nvm_version_invm(sc);
   13556 			have_uid = false;
   13557 			goto printver;
   13558 		}
   13559 		/* FALLTHROUGH */
   13560 	case WM_T_I350:
   13561 	case WM_T_I354:
   13562 		check_version = true;
   13563 		check_optionrom = true;
   13564 		break;
   13565 	default:
   13566 		return;
   13567 	}
   13568 	if (check_version
   13569 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13570 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13571 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13572 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13573 			build = nvm_data & NVM_BUILD_MASK;
   13574 			have_build = true;
   13575 		} else
   13576 			minor = nvm_data & 0x00ff;
   13577 
   13578 		/* Decimal */
   13579 		minor = (minor / 16) * 10 + (minor % 16);
   13580 		sc->sc_nvm_ver_major = major;
   13581 		sc->sc_nvm_ver_minor = minor;
   13582 
   13583 printver:
   13584 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13585 		    sc->sc_nvm_ver_minor);
   13586 		if (have_build) {
   13587 			sc->sc_nvm_ver_build = build;
   13588 			aprint_verbose(".%d", build);
   13589 		}
   13590 	}
   13591 
   13592 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13593 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13594 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13595 		/* Option ROM Version */
   13596 		if ((off != 0x0000) && (off != 0xffff)) {
   13597 			int rv;
   13598 
   13599 			off += NVM_COMBO_VER_OFF;
   13600 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13601 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13602 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13603 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13604 				/* 16bits */
   13605 				major = uid0 >> 8;
   13606 				build = (uid0 << 8) | (uid1 >> 8);
   13607 				patch = uid1 & 0x00ff;
   13608 				aprint_verbose(", option ROM Version %d.%d.%d",
   13609 				    major, build, patch);
   13610 			}
   13611 		}
   13612 	}
   13613 
   13614 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13615 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13616 }
   13617 
   13618 /*
   13619  * wm_nvm_read:
   13620  *
   13621  *	Read data from the serial EEPROM.
   13622  */
   13623 static int
   13624 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13625 {
   13626 	int rv;
   13627 
   13628 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13629 		device_xname(sc->sc_dev), __func__));
   13630 
   13631 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13632 		return -1;
   13633 
   13634 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13635 
   13636 	return rv;
   13637 }
   13638 
   13639 /*
   13640  * Hardware semaphores.
   13641  * Very complexed...
   13642  */
   13643 
   13644 static int
   13645 wm_get_null(struct wm_softc *sc)
   13646 {
   13647 
   13648 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13649 		device_xname(sc->sc_dev), __func__));
   13650 	return 0;
   13651 }
   13652 
   13653 static void
   13654 wm_put_null(struct wm_softc *sc)
   13655 {
   13656 
   13657 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13658 		device_xname(sc->sc_dev), __func__));
   13659 	return;
   13660 }
   13661 
   13662 static int
   13663 wm_get_eecd(struct wm_softc *sc)
   13664 {
   13665 	uint32_t reg;
   13666 	int x;
   13667 
   13668 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13669 		device_xname(sc->sc_dev), __func__));
   13670 
   13671 	reg = CSR_READ(sc, WMREG_EECD);
   13672 
   13673 	/* Request EEPROM access. */
   13674 	reg |= EECD_EE_REQ;
   13675 	CSR_WRITE(sc, WMREG_EECD, reg);
   13676 
   13677 	/* ..and wait for it to be granted. */
   13678 	for (x = 0; x < 1000; x++) {
   13679 		reg = CSR_READ(sc, WMREG_EECD);
   13680 		if (reg & EECD_EE_GNT)
   13681 			break;
   13682 		delay(5);
   13683 	}
   13684 	if ((reg & EECD_EE_GNT) == 0) {
   13685 		aprint_error_dev(sc->sc_dev,
   13686 		    "could not acquire EEPROM GNT\n");
   13687 		reg &= ~EECD_EE_REQ;
   13688 		CSR_WRITE(sc, WMREG_EECD, reg);
   13689 		return -1;
   13690 	}
   13691 
   13692 	return 0;
   13693 }
   13694 
   13695 static void
   13696 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13697 {
   13698 
   13699 	*eecd |= EECD_SK;
   13700 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13701 	CSR_WRITE_FLUSH(sc);
   13702 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13703 		delay(1);
   13704 	else
   13705 		delay(50);
   13706 }
   13707 
   13708 static void
   13709 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13710 {
   13711 
   13712 	*eecd &= ~EECD_SK;
   13713 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13714 	CSR_WRITE_FLUSH(sc);
   13715 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13716 		delay(1);
   13717 	else
   13718 		delay(50);
   13719 }
   13720 
   13721 static void
   13722 wm_put_eecd(struct wm_softc *sc)
   13723 {
   13724 	uint32_t reg;
   13725 
   13726 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13727 		device_xname(sc->sc_dev), __func__));
   13728 
   13729 	/* Stop nvm */
   13730 	reg = CSR_READ(sc, WMREG_EECD);
   13731 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13732 		/* Pull CS high */
   13733 		reg |= EECD_CS;
   13734 		wm_nvm_eec_clock_lower(sc, &reg);
   13735 	} else {
   13736 		/* CS on Microwire is active-high */
   13737 		reg &= ~(EECD_CS | EECD_DI);
   13738 		CSR_WRITE(sc, WMREG_EECD, reg);
   13739 		wm_nvm_eec_clock_raise(sc, &reg);
   13740 		wm_nvm_eec_clock_lower(sc, &reg);
   13741 	}
   13742 
   13743 	reg = CSR_READ(sc, WMREG_EECD);
   13744 	reg &= ~EECD_EE_REQ;
   13745 	CSR_WRITE(sc, WMREG_EECD, reg);
   13746 
   13747 	return;
   13748 }
   13749 
   13750 /*
   13751  * Get hardware semaphore.
   13752  * Same as e1000_get_hw_semaphore_generic()
   13753  */
   13754 static int
   13755 wm_get_swsm_semaphore(struct wm_softc *sc)
   13756 {
   13757 	int32_t timeout;
   13758 	uint32_t swsm;
   13759 
   13760 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13761 		device_xname(sc->sc_dev), __func__));
   13762 	KASSERT(sc->sc_nvm_wordsize > 0);
   13763 
   13764 retry:
   13765 	/* Get the SW semaphore. */
   13766 	timeout = sc->sc_nvm_wordsize + 1;
   13767 	while (timeout) {
   13768 		swsm = CSR_READ(sc, WMREG_SWSM);
   13769 
   13770 		if ((swsm & SWSM_SMBI) == 0)
   13771 			break;
   13772 
   13773 		delay(50);
   13774 		timeout--;
   13775 	}
   13776 
   13777 	if (timeout == 0) {
   13778 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13779 			/*
   13780 			 * In rare circumstances, the SW semaphore may already
   13781 			 * be held unintentionally. Clear the semaphore once
   13782 			 * before giving up.
   13783 			 */
   13784 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13785 			wm_put_swsm_semaphore(sc);
   13786 			goto retry;
   13787 		}
   13788 		aprint_error_dev(sc->sc_dev,
   13789 		    "could not acquire SWSM SMBI\n");
   13790 		return 1;
   13791 	}
   13792 
   13793 	/* Get the FW semaphore. */
   13794 	timeout = sc->sc_nvm_wordsize + 1;
   13795 	while (timeout) {
   13796 		swsm = CSR_READ(sc, WMREG_SWSM);
   13797 		swsm |= SWSM_SWESMBI;
   13798 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13799 		/* If we managed to set the bit we got the semaphore. */
   13800 		swsm = CSR_READ(sc, WMREG_SWSM);
   13801 		if (swsm & SWSM_SWESMBI)
   13802 			break;
   13803 
   13804 		delay(50);
   13805 		timeout--;
   13806 	}
   13807 
   13808 	if (timeout == 0) {
   13809 		aprint_error_dev(sc->sc_dev,
   13810 		    "could not acquire SWSM SWESMBI\n");
   13811 		/* Release semaphores */
   13812 		wm_put_swsm_semaphore(sc);
   13813 		return 1;
   13814 	}
   13815 	return 0;
   13816 }
   13817 
   13818 /*
   13819  * Put hardware semaphore.
   13820  * Same as e1000_put_hw_semaphore_generic()
   13821  */
   13822 static void
   13823 wm_put_swsm_semaphore(struct wm_softc *sc)
   13824 {
   13825 	uint32_t swsm;
   13826 
   13827 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13828 		device_xname(sc->sc_dev), __func__));
   13829 
   13830 	swsm = CSR_READ(sc, WMREG_SWSM);
   13831 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13832 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13833 }
   13834 
   13835 /*
   13836  * Get SW/FW semaphore.
   13837  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13838  */
   13839 static int
   13840 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13841 {
   13842 	uint32_t swfw_sync;
   13843 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13844 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13845 	int timeout;
   13846 
   13847 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13848 		device_xname(sc->sc_dev), __func__));
   13849 
   13850 	if (sc->sc_type == WM_T_80003)
   13851 		timeout = 50;
   13852 	else
   13853 		timeout = 200;
   13854 
   13855 	while (timeout) {
   13856 		if (wm_get_swsm_semaphore(sc)) {
   13857 			aprint_error_dev(sc->sc_dev,
   13858 			    "%s: failed to get semaphore\n",
   13859 			    __func__);
   13860 			return 1;
   13861 		}
   13862 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13863 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13864 			swfw_sync |= swmask;
   13865 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13866 			wm_put_swsm_semaphore(sc);
   13867 			return 0;
   13868 		}
   13869 		wm_put_swsm_semaphore(sc);
   13870 		delay(5000);
   13871 		timeout--;
   13872 	}
   13873 	device_printf(sc->sc_dev,
   13874 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13875 	    mask, swfw_sync);
   13876 	return 1;
   13877 }
   13878 
   13879 static void
   13880 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13881 {
   13882 	uint32_t swfw_sync;
   13883 
   13884 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13885 		device_xname(sc->sc_dev), __func__));
   13886 
   13887 	while (wm_get_swsm_semaphore(sc) != 0)
   13888 		continue;
   13889 
   13890 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13891 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13892 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13893 
   13894 	wm_put_swsm_semaphore(sc);
   13895 }
   13896 
   13897 static int
   13898 wm_get_nvm_80003(struct wm_softc *sc)
   13899 {
   13900 	int rv;
   13901 
   13902 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13903 		device_xname(sc->sc_dev), __func__));
   13904 
   13905 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13906 		aprint_error_dev(sc->sc_dev,
   13907 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13908 		return rv;
   13909 	}
   13910 
   13911 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13912 	    && (rv = wm_get_eecd(sc)) != 0) {
   13913 		aprint_error_dev(sc->sc_dev,
   13914 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13915 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13916 		return rv;
   13917 	}
   13918 
   13919 	return 0;
   13920 }
   13921 
   13922 static void
   13923 wm_put_nvm_80003(struct wm_softc *sc)
   13924 {
   13925 
   13926 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13927 		device_xname(sc->sc_dev), __func__));
   13928 
   13929 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13930 		wm_put_eecd(sc);
   13931 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13932 }
   13933 
   13934 static int
   13935 wm_get_nvm_82571(struct wm_softc *sc)
   13936 {
   13937 	int rv;
   13938 
   13939 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13940 		device_xname(sc->sc_dev), __func__));
   13941 
   13942 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13943 		return rv;
   13944 
   13945 	switch (sc->sc_type) {
   13946 	case WM_T_82573:
   13947 		break;
   13948 	default:
   13949 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13950 			rv = wm_get_eecd(sc);
   13951 		break;
   13952 	}
   13953 
   13954 	if (rv != 0) {
   13955 		aprint_error_dev(sc->sc_dev,
   13956 		    "%s: failed to get semaphore\n",
   13957 		    __func__);
   13958 		wm_put_swsm_semaphore(sc);
   13959 	}
   13960 
   13961 	return rv;
   13962 }
   13963 
   13964 static void
   13965 wm_put_nvm_82571(struct wm_softc *sc)
   13966 {
   13967 
   13968 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13969 		device_xname(sc->sc_dev), __func__));
   13970 
   13971 	switch (sc->sc_type) {
   13972 	case WM_T_82573:
   13973 		break;
   13974 	default:
   13975 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13976 			wm_put_eecd(sc);
   13977 		break;
   13978 	}
   13979 
   13980 	wm_put_swsm_semaphore(sc);
   13981 }
   13982 
   13983 static int
   13984 wm_get_phy_82575(struct wm_softc *sc)
   13985 {
   13986 
   13987 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13988 		device_xname(sc->sc_dev), __func__));
   13989 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13990 }
   13991 
   13992 static void
   13993 wm_put_phy_82575(struct wm_softc *sc)
   13994 {
   13995 
   13996 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13997 		device_xname(sc->sc_dev), __func__));
   13998 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13999 }
   14000 
   14001 static int
   14002 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14003 {
   14004 	uint32_t ext_ctrl;
   14005 	int timeout = 200;
   14006 
   14007 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14008 		device_xname(sc->sc_dev), __func__));
   14009 
   14010 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14011 	for (timeout = 0; timeout < 200; timeout++) {
   14012 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14013 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14014 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14015 
   14016 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14017 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14018 			return 0;
   14019 		delay(5000);
   14020 	}
   14021 	device_printf(sc->sc_dev,
   14022 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14023 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14024 	return 1;
   14025 }
   14026 
   14027 static void
   14028 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14029 {
   14030 	uint32_t ext_ctrl;
   14031 
   14032 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14033 		device_xname(sc->sc_dev), __func__));
   14034 
   14035 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14036 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14037 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14038 
   14039 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14040 }
   14041 
   14042 static int
   14043 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14044 {
   14045 	uint32_t ext_ctrl;
   14046 	int timeout;
   14047 
   14048 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14049 		device_xname(sc->sc_dev), __func__));
   14050 	mutex_enter(sc->sc_ich_phymtx);
   14051 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14052 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14053 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14054 			break;
   14055 		delay(1000);
   14056 	}
   14057 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14058 		device_printf(sc->sc_dev,
   14059 		    "SW has already locked the resource\n");
   14060 		goto out;
   14061 	}
   14062 
   14063 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14064 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14065 	for (timeout = 0; timeout < 1000; timeout++) {
   14066 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14067 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14068 			break;
   14069 		delay(1000);
   14070 	}
   14071 	if (timeout >= 1000) {
   14072 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14073 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14074 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14075 		goto out;
   14076 	}
   14077 	return 0;
   14078 
   14079 out:
   14080 	mutex_exit(sc->sc_ich_phymtx);
   14081 	return 1;
   14082 }
   14083 
   14084 static void
   14085 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14086 {
   14087 	uint32_t ext_ctrl;
   14088 
   14089 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14090 		device_xname(sc->sc_dev), __func__));
   14091 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14092 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14093 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14094 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14095 	} else {
   14096 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14097 	}
   14098 
   14099 	mutex_exit(sc->sc_ich_phymtx);
   14100 }
   14101 
   14102 static int
   14103 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14104 {
   14105 
   14106 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14107 		device_xname(sc->sc_dev), __func__));
   14108 	mutex_enter(sc->sc_ich_nvmmtx);
   14109 
   14110 	return 0;
   14111 }
   14112 
   14113 static void
   14114 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14115 {
   14116 
   14117 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14118 		device_xname(sc->sc_dev), __func__));
   14119 	mutex_exit(sc->sc_ich_nvmmtx);
   14120 }
   14121 
   14122 static int
   14123 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14124 {
   14125 	int i = 0;
   14126 	uint32_t reg;
   14127 
   14128 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14129 		device_xname(sc->sc_dev), __func__));
   14130 
   14131 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14132 	do {
   14133 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14134 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14135 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14136 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14137 			break;
   14138 		delay(2*1000);
   14139 		i++;
   14140 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14141 
   14142 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14143 		wm_put_hw_semaphore_82573(sc);
   14144 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14145 		    device_xname(sc->sc_dev));
   14146 		return -1;
   14147 	}
   14148 
   14149 	return 0;
   14150 }
   14151 
   14152 static void
   14153 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14154 {
   14155 	uint32_t reg;
   14156 
   14157 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14158 		device_xname(sc->sc_dev), __func__));
   14159 
   14160 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14161 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14162 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14163 }
   14164 
   14165 /*
   14166  * Management mode and power management related subroutines.
   14167  * BMC, AMT, suspend/resume and EEE.
   14168  */
   14169 
   14170 #ifdef WM_WOL
   14171 static int
   14172 wm_check_mng_mode(struct wm_softc *sc)
   14173 {
   14174 	int rv;
   14175 
   14176 	switch (sc->sc_type) {
   14177 	case WM_T_ICH8:
   14178 	case WM_T_ICH9:
   14179 	case WM_T_ICH10:
   14180 	case WM_T_PCH:
   14181 	case WM_T_PCH2:
   14182 	case WM_T_PCH_LPT:
   14183 	case WM_T_PCH_SPT:
   14184 	case WM_T_PCH_CNP:
   14185 		rv = wm_check_mng_mode_ich8lan(sc);
   14186 		break;
   14187 	case WM_T_82574:
   14188 	case WM_T_82583:
   14189 		rv = wm_check_mng_mode_82574(sc);
   14190 		break;
   14191 	case WM_T_82571:
   14192 	case WM_T_82572:
   14193 	case WM_T_82573:
   14194 	case WM_T_80003:
   14195 		rv = wm_check_mng_mode_generic(sc);
   14196 		break;
   14197 	default:
   14198 		/* Noting to do */
   14199 		rv = 0;
   14200 		break;
   14201 	}
   14202 
   14203 	return rv;
   14204 }
   14205 
   14206 static int
   14207 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14208 {
   14209 	uint32_t fwsm;
   14210 
   14211 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14212 
   14213 	if (((fwsm & FWSM_FW_VALID) != 0)
   14214 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14215 		return 1;
   14216 
   14217 	return 0;
   14218 }
   14219 
   14220 static int
   14221 wm_check_mng_mode_82574(struct wm_softc *sc)
   14222 {
   14223 	uint16_t data;
   14224 
   14225 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14226 
   14227 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14228 		return 1;
   14229 
   14230 	return 0;
   14231 }
   14232 
   14233 static int
   14234 wm_check_mng_mode_generic(struct wm_softc *sc)
   14235 {
   14236 	uint32_t fwsm;
   14237 
   14238 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14239 
   14240 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14241 		return 1;
   14242 
   14243 	return 0;
   14244 }
   14245 #endif /* WM_WOL */
   14246 
   14247 static int
   14248 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14249 {
   14250 	uint32_t manc, fwsm, factps;
   14251 
   14252 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14253 		return 0;
   14254 
   14255 	manc = CSR_READ(sc, WMREG_MANC);
   14256 
   14257 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14258 		device_xname(sc->sc_dev), manc));
   14259 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14260 		return 0;
   14261 
   14262 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14263 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14264 		factps = CSR_READ(sc, WMREG_FACTPS);
   14265 		if (((factps & FACTPS_MNGCG) == 0)
   14266 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14267 			return 1;
   14268 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14269 		uint16_t data;
   14270 
   14271 		factps = CSR_READ(sc, WMREG_FACTPS);
   14272 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14273 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14274 			device_xname(sc->sc_dev), factps, data));
   14275 		if (((factps & FACTPS_MNGCG) == 0)
   14276 		    && ((data & NVM_CFG2_MNGM_MASK)
   14277 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14278 			return 1;
   14279 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14280 	    && ((manc & MANC_ASF_EN) == 0))
   14281 		return 1;
   14282 
   14283 	return 0;
   14284 }
   14285 
   14286 static bool
   14287 wm_phy_resetisblocked(struct wm_softc *sc)
   14288 {
   14289 	bool blocked = false;
   14290 	uint32_t reg;
   14291 	int i = 0;
   14292 
   14293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14294 		device_xname(sc->sc_dev), __func__));
   14295 
   14296 	switch (sc->sc_type) {
   14297 	case WM_T_ICH8:
   14298 	case WM_T_ICH9:
   14299 	case WM_T_ICH10:
   14300 	case WM_T_PCH:
   14301 	case WM_T_PCH2:
   14302 	case WM_T_PCH_LPT:
   14303 	case WM_T_PCH_SPT:
   14304 	case WM_T_PCH_CNP:
   14305 		do {
   14306 			reg = CSR_READ(sc, WMREG_FWSM);
   14307 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14308 				blocked = true;
   14309 				delay(10*1000);
   14310 				continue;
   14311 			}
   14312 			blocked = false;
   14313 		} while (blocked && (i++ < 30));
   14314 		return blocked;
   14315 		break;
   14316 	case WM_T_82571:
   14317 	case WM_T_82572:
   14318 	case WM_T_82573:
   14319 	case WM_T_82574:
   14320 	case WM_T_82583:
   14321 	case WM_T_80003:
   14322 		reg = CSR_READ(sc, WMREG_MANC);
   14323 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14324 			return true;
   14325 		else
   14326 			return false;
   14327 		break;
   14328 	default:
   14329 		/* No problem */
   14330 		break;
   14331 	}
   14332 
   14333 	return false;
   14334 }
   14335 
   14336 static void
   14337 wm_get_hw_control(struct wm_softc *sc)
   14338 {
   14339 	uint32_t reg;
   14340 
   14341 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14342 		device_xname(sc->sc_dev), __func__));
   14343 
   14344 	if (sc->sc_type == WM_T_82573) {
   14345 		reg = CSR_READ(sc, WMREG_SWSM);
   14346 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14347 	} else if (sc->sc_type >= WM_T_82571) {
   14348 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14349 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14350 	}
   14351 }
   14352 
   14353 static void
   14354 wm_release_hw_control(struct wm_softc *sc)
   14355 {
   14356 	uint32_t reg;
   14357 
   14358 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14359 		device_xname(sc->sc_dev), __func__));
   14360 
   14361 	if (sc->sc_type == WM_T_82573) {
   14362 		reg = CSR_READ(sc, WMREG_SWSM);
   14363 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14364 	} else if (sc->sc_type >= WM_T_82571) {
   14365 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14366 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14367 	}
   14368 }
   14369 
   14370 static void
   14371 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14372 {
   14373 	uint32_t reg;
   14374 
   14375 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14376 		device_xname(sc->sc_dev), __func__));
   14377 
   14378 	if (sc->sc_type < WM_T_PCH2)
   14379 		return;
   14380 
   14381 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14382 
   14383 	if (gate)
   14384 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14385 	else
   14386 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14387 
   14388 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14389 }
   14390 
   14391 static int
   14392 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14393 {
   14394 	uint32_t fwsm, reg;
   14395 	int rv = 0;
   14396 
   14397 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14398 		device_xname(sc->sc_dev), __func__));
   14399 
   14400 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14401 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14402 
   14403 	/* Disable ULP */
   14404 	wm_ulp_disable(sc);
   14405 
   14406 	/* Acquire PHY semaphore */
   14407 	rv = sc->phy.acquire(sc);
   14408 	if (rv != 0) {
   14409 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14410 		device_xname(sc->sc_dev), __func__));
   14411 		return -1;
   14412 	}
   14413 
   14414 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14415 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14416 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14417 	 */
   14418 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14419 	switch (sc->sc_type) {
   14420 	case WM_T_PCH_LPT:
   14421 	case WM_T_PCH_SPT:
   14422 	case WM_T_PCH_CNP:
   14423 		if (wm_phy_is_accessible_pchlan(sc))
   14424 			break;
   14425 
   14426 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14427 		 * forcing MAC to SMBus mode first.
   14428 		 */
   14429 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14430 		reg |= CTRL_EXT_FORCE_SMBUS;
   14431 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14432 #if 0
   14433 		/* XXX Isn't this required??? */
   14434 		CSR_WRITE_FLUSH(sc);
   14435 #endif
   14436 		/* Wait 50 milliseconds for MAC to finish any retries
   14437 		 * that it might be trying to perform from previous
   14438 		 * attempts to acknowledge any phy read requests.
   14439 		 */
   14440 		delay(50 * 1000);
   14441 		/* FALLTHROUGH */
   14442 	case WM_T_PCH2:
   14443 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14444 			break;
   14445 		/* FALLTHROUGH */
   14446 	case WM_T_PCH:
   14447 		if (sc->sc_type == WM_T_PCH)
   14448 			if ((fwsm & FWSM_FW_VALID) != 0)
   14449 				break;
   14450 
   14451 		if (wm_phy_resetisblocked(sc) == true) {
   14452 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14453 			break;
   14454 		}
   14455 
   14456 		/* Toggle LANPHYPC Value bit */
   14457 		wm_toggle_lanphypc_pch_lpt(sc);
   14458 
   14459 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14460 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14461 				break;
   14462 
   14463 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14464 			 * so ensure that the MAC is also out of SMBus mode
   14465 			 */
   14466 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14467 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14468 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14469 
   14470 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14471 				break;
   14472 			rv = -1;
   14473 		}
   14474 		break;
   14475 	default:
   14476 		break;
   14477 	}
   14478 
   14479 	/* Release semaphore */
   14480 	sc->phy.release(sc);
   14481 
   14482 	if (rv == 0) {
   14483 		/* Check to see if able to reset PHY.  Print error if not */
   14484 		if (wm_phy_resetisblocked(sc)) {
   14485 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14486 			goto out;
   14487 		}
   14488 
   14489 		/* Reset the PHY before any access to it.  Doing so, ensures
   14490 		 * that the PHY is in a known good state before we read/write
   14491 		 * PHY registers.  The generic reset is sufficient here,
   14492 		 * because we haven't determined the PHY type yet.
   14493 		 */
   14494 		if (wm_reset_phy(sc) != 0)
   14495 			goto out;
   14496 
   14497 		/* On a successful reset, possibly need to wait for the PHY
   14498 		 * to quiesce to an accessible state before returning control
   14499 		 * to the calling function.  If the PHY does not quiesce, then
   14500 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14501 		 *  the PHY is in.
   14502 		 */
   14503 		if (wm_phy_resetisblocked(sc))
   14504 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14505 	}
   14506 
   14507 out:
   14508 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14509 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14510 		delay(10*1000);
   14511 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14512 	}
   14513 
   14514 	return 0;
   14515 }
   14516 
   14517 static void
   14518 wm_init_manageability(struct wm_softc *sc)
   14519 {
   14520 
   14521 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14522 		device_xname(sc->sc_dev), __func__));
   14523 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14524 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14525 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14526 
   14527 		/* Disable hardware interception of ARP */
   14528 		manc &= ~MANC_ARP_EN;
   14529 
   14530 		/* Enable receiving management packets to the host */
   14531 		if (sc->sc_type >= WM_T_82571) {
   14532 			manc |= MANC_EN_MNG2HOST;
   14533 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14534 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14535 		}
   14536 
   14537 		CSR_WRITE(sc, WMREG_MANC, manc);
   14538 	}
   14539 }
   14540 
   14541 static void
   14542 wm_release_manageability(struct wm_softc *sc)
   14543 {
   14544 
   14545 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14546 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14547 
   14548 		manc |= MANC_ARP_EN;
   14549 		if (sc->sc_type >= WM_T_82571)
   14550 			manc &= ~MANC_EN_MNG2HOST;
   14551 
   14552 		CSR_WRITE(sc, WMREG_MANC, manc);
   14553 	}
   14554 }
   14555 
   14556 static void
   14557 wm_get_wakeup(struct wm_softc *sc)
   14558 {
   14559 
   14560 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14561 	switch (sc->sc_type) {
   14562 	case WM_T_82573:
   14563 	case WM_T_82583:
   14564 		sc->sc_flags |= WM_F_HAS_AMT;
   14565 		/* FALLTHROUGH */
   14566 	case WM_T_80003:
   14567 	case WM_T_82575:
   14568 	case WM_T_82576:
   14569 	case WM_T_82580:
   14570 	case WM_T_I350:
   14571 	case WM_T_I354:
   14572 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14573 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14574 		/* FALLTHROUGH */
   14575 	case WM_T_82541:
   14576 	case WM_T_82541_2:
   14577 	case WM_T_82547:
   14578 	case WM_T_82547_2:
   14579 	case WM_T_82571:
   14580 	case WM_T_82572:
   14581 	case WM_T_82574:
   14582 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14583 		break;
   14584 	case WM_T_ICH8:
   14585 	case WM_T_ICH9:
   14586 	case WM_T_ICH10:
   14587 	case WM_T_PCH:
   14588 	case WM_T_PCH2:
   14589 	case WM_T_PCH_LPT:
   14590 	case WM_T_PCH_SPT:
   14591 	case WM_T_PCH_CNP:
   14592 		sc->sc_flags |= WM_F_HAS_AMT;
   14593 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14594 		break;
   14595 	default:
   14596 		break;
   14597 	}
   14598 
   14599 	/* 1: HAS_MANAGE */
   14600 	if (wm_enable_mng_pass_thru(sc) != 0)
   14601 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14602 
   14603 	/*
   14604 	 * Note that the WOL flags is set after the resetting of the eeprom
   14605 	 * stuff
   14606 	 */
   14607 }
   14608 
   14609 /*
   14610  * Unconfigure Ultra Low Power mode.
   14611  * Only for I217 and newer (see below).
   14612  */
   14613 static int
   14614 wm_ulp_disable(struct wm_softc *sc)
   14615 {
   14616 	uint32_t reg;
   14617 	uint16_t phyreg;
   14618 	int i = 0, rv = 0;
   14619 
   14620 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14621 		device_xname(sc->sc_dev), __func__));
   14622 	/* Exclude old devices */
   14623 	if ((sc->sc_type < WM_T_PCH_LPT)
   14624 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14625 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14626 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14627 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14628 		return 0;
   14629 
   14630 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14631 		/* Request ME un-configure ULP mode in the PHY */
   14632 		reg = CSR_READ(sc, WMREG_H2ME);
   14633 		reg &= ~H2ME_ULP;
   14634 		reg |= H2ME_ENFORCE_SETTINGS;
   14635 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14636 
   14637 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14638 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14639 			if (i++ == 30) {
   14640 				device_printf(sc->sc_dev, "%s timed out\n",
   14641 				    __func__);
   14642 				return -1;
   14643 			}
   14644 			delay(10 * 1000);
   14645 		}
   14646 		reg = CSR_READ(sc, WMREG_H2ME);
   14647 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14648 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14649 
   14650 		return 0;
   14651 	}
   14652 
   14653 	/* Acquire semaphore */
   14654 	rv = sc->phy.acquire(sc);
   14655 	if (rv != 0) {
   14656 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14657 		device_xname(sc->sc_dev), __func__));
   14658 		return -1;
   14659 	}
   14660 
   14661 	/* Toggle LANPHYPC */
   14662 	wm_toggle_lanphypc_pch_lpt(sc);
   14663 
   14664 	/* Unforce SMBus mode in PHY */
   14665 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14666 	if (rv != 0) {
   14667 		uint32_t reg2;
   14668 
   14669 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14670 			__func__);
   14671 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14672 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14673 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14674 		delay(50 * 1000);
   14675 
   14676 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14677 		    &phyreg);
   14678 		if (rv != 0)
   14679 			goto release;
   14680 	}
   14681 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14682 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14683 
   14684 	/* Unforce SMBus mode in MAC */
   14685 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14686 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14687 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14688 
   14689 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14690 	if (rv != 0)
   14691 		goto release;
   14692 	phyreg |= HV_PM_CTRL_K1_ENA;
   14693 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14694 
   14695 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14696 		&phyreg);
   14697 	if (rv != 0)
   14698 		goto release;
   14699 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14700 	    | I218_ULP_CONFIG1_STICKY_ULP
   14701 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14702 	    | I218_ULP_CONFIG1_WOL_HOST
   14703 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14704 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14705 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14706 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14707 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14708 	phyreg |= I218_ULP_CONFIG1_START;
   14709 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14710 
   14711 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14712 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14713 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14714 
   14715 release:
   14716 	/* Release semaphore */
   14717 	sc->phy.release(sc);
   14718 	wm_gmii_reset(sc);
   14719 	delay(50 * 1000);
   14720 
   14721 	return rv;
   14722 }
   14723 
   14724 /* WOL in the newer chipset interfaces (pchlan) */
   14725 static int
   14726 wm_enable_phy_wakeup(struct wm_softc *sc)
   14727 {
   14728 	device_t dev = sc->sc_dev;
   14729 	uint32_t mreg, moff;
   14730 	uint16_t wuce, wuc, wufc, preg;
   14731 	int i, rv;
   14732 
   14733 	KASSERT(sc->sc_type >= WM_T_PCH);
   14734 
   14735 	/* Copy MAC RARs to PHY RARs */
   14736 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14737 
   14738 	/* Activate PHY wakeup */
   14739 	rv = sc->phy.acquire(sc);
   14740 	if (rv != 0) {
   14741 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14742 		    __func__);
   14743 		return rv;
   14744 	}
   14745 
   14746 	/*
   14747 	 * Enable access to PHY wakeup registers.
   14748 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14749 	 */
   14750 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14751 	if (rv != 0) {
   14752 		device_printf(dev,
   14753 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14754 		goto release;
   14755 	}
   14756 
   14757 	/* Copy MAC MTA to PHY MTA */
   14758 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14759 		uint16_t lo, hi;
   14760 
   14761 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14762 		lo = (uint16_t)(mreg & 0xffff);
   14763 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14764 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14765 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14766 	}
   14767 
   14768 	/* Configure PHY Rx Control register */
   14769 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14770 	mreg = CSR_READ(sc, WMREG_RCTL);
   14771 	if (mreg & RCTL_UPE)
   14772 		preg |= BM_RCTL_UPE;
   14773 	if (mreg & RCTL_MPE)
   14774 		preg |= BM_RCTL_MPE;
   14775 	preg &= ~(BM_RCTL_MO_MASK);
   14776 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14777 	if (moff != 0)
   14778 		preg |= moff << BM_RCTL_MO_SHIFT;
   14779 	if (mreg & RCTL_BAM)
   14780 		preg |= BM_RCTL_BAM;
   14781 	if (mreg & RCTL_PMCF)
   14782 		preg |= BM_RCTL_PMCF;
   14783 	mreg = CSR_READ(sc, WMREG_CTRL);
   14784 	if (mreg & CTRL_RFCE)
   14785 		preg |= BM_RCTL_RFCE;
   14786 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14787 
   14788 	wuc = WUC_APME | WUC_PME_EN;
   14789 	wufc = WUFC_MAG;
   14790 	/* Enable PHY wakeup in MAC register */
   14791 	CSR_WRITE(sc, WMREG_WUC,
   14792 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14793 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14794 
   14795 	/* Configure and enable PHY wakeup in PHY registers */
   14796 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14797 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14798 
   14799 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14800 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14801 
   14802 release:
   14803 	sc->phy.release(sc);
   14804 
   14805 	return 0;
   14806 }
   14807 
   14808 /* Power down workaround on D3 */
   14809 static void
   14810 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14811 {
   14812 	uint32_t reg;
   14813 	uint16_t phyreg;
   14814 	int i;
   14815 
   14816 	for (i = 0; i < 2; i++) {
   14817 		/* Disable link */
   14818 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14819 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14820 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14821 
   14822 		/*
   14823 		 * Call gig speed drop workaround on Gig disable before
   14824 		 * accessing any PHY registers
   14825 		 */
   14826 		if (sc->sc_type == WM_T_ICH8)
   14827 			wm_gig_downshift_workaround_ich8lan(sc);
   14828 
   14829 		/* Write VR power-down enable */
   14830 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14831 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14832 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14833 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14834 
   14835 		/* Read it back and test */
   14836 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14837 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14838 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14839 			break;
   14840 
   14841 		/* Issue PHY reset and repeat at most one more time */
   14842 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14843 	}
   14844 }
   14845 
   14846 /*
   14847  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14848  *  @sc: pointer to the HW structure
   14849  *
   14850  *  During S0 to Sx transition, it is possible the link remains at gig
   14851  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14852  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14853  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14854  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14855  *  needs to be written.
   14856  *  Parts that support (and are linked to a partner which support) EEE in
   14857  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14858  *  than 10Mbps w/o EEE.
   14859  */
   14860 static void
   14861 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14862 {
   14863 	device_t dev = sc->sc_dev;
   14864 	struct ethercom *ec = &sc->sc_ethercom;
   14865 	uint32_t phy_ctrl;
   14866 	int rv;
   14867 
   14868 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14869 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14870 
   14871 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14872 
   14873 	if (sc->sc_phytype == WMPHY_I217) {
   14874 		uint16_t devid = sc->sc_pcidevid;
   14875 
   14876 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14877 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14878 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14879 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14880 		    (sc->sc_type >= WM_T_PCH_SPT))
   14881 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14882 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14883 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14884 
   14885 		if (sc->phy.acquire(sc) != 0)
   14886 			goto out;
   14887 
   14888 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14889 			uint16_t eee_advert;
   14890 
   14891 			rv = wm_read_emi_reg_locked(dev,
   14892 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14893 			if (rv)
   14894 				goto release;
   14895 
   14896 			/*
   14897 			 * Disable LPLU if both link partners support 100BaseT
   14898 			 * EEE and 100Full is advertised on both ends of the
   14899 			 * link, and enable Auto Enable LPI since there will
   14900 			 * be no driver to enable LPI while in Sx.
   14901 			 */
   14902 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14903 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14904 				uint16_t anar, phy_reg;
   14905 
   14906 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14907 				    &anar);
   14908 				if (anar & ANAR_TX_FD) {
   14909 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14910 					    PHY_CTRL_NOND0A_LPLU);
   14911 
   14912 					/* Set Auto Enable LPI after link up */
   14913 					sc->phy.readreg_locked(dev, 2,
   14914 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14915 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14916 					sc->phy.writereg_locked(dev, 2,
   14917 					    I217_LPI_GPIO_CTRL, phy_reg);
   14918 				}
   14919 			}
   14920 		}
   14921 
   14922 		/*
   14923 		 * For i217 Intel Rapid Start Technology support,
   14924 		 * when the system is going into Sx and no manageability engine
   14925 		 * is present, the driver must configure proxy to reset only on
   14926 		 * power good.	LPI (Low Power Idle) state must also reset only
   14927 		 * on power good, as well as the MTA (Multicast table array).
   14928 		 * The SMBus release must also be disabled on LCD reset.
   14929 		 */
   14930 
   14931 		/*
   14932 		 * Enable MTA to reset for Intel Rapid Start Technology
   14933 		 * Support
   14934 		 */
   14935 
   14936 release:
   14937 		sc->phy.release(sc);
   14938 	}
   14939 out:
   14940 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14941 
   14942 	if (sc->sc_type == WM_T_ICH8)
   14943 		wm_gig_downshift_workaround_ich8lan(sc);
   14944 
   14945 	if (sc->sc_type >= WM_T_PCH) {
   14946 		wm_oem_bits_config_ich8lan(sc, false);
   14947 
   14948 		/* Reset PHY to activate OEM bits on 82577/8 */
   14949 		if (sc->sc_type == WM_T_PCH)
   14950 			wm_reset_phy(sc);
   14951 
   14952 		if (sc->phy.acquire(sc) != 0)
   14953 			return;
   14954 		wm_write_smbus_addr(sc);
   14955 		sc->phy.release(sc);
   14956 	}
   14957 }
   14958 
   14959 /*
   14960  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14961  *  @sc: pointer to the HW structure
   14962  *
   14963  *  During Sx to S0 transitions on non-managed devices or managed devices
   14964  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14965  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14966  *  the PHY.
   14967  *  On i217, setup Intel Rapid Start Technology.
   14968  */
   14969 static int
   14970 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14971 {
   14972 	device_t dev = sc->sc_dev;
   14973 	int rv;
   14974 
   14975 	if (sc->sc_type < WM_T_PCH2)
   14976 		return 0;
   14977 
   14978 	rv = wm_init_phy_workarounds_pchlan(sc);
   14979 	if (rv != 0)
   14980 		return -1;
   14981 
   14982 	/* For i217 Intel Rapid Start Technology support when the system
   14983 	 * is transitioning from Sx and no manageability engine is present
   14984 	 * configure SMBus to restore on reset, disable proxy, and enable
   14985 	 * the reset on MTA (Multicast table array).
   14986 	 */
   14987 	if (sc->sc_phytype == WMPHY_I217) {
   14988 		uint16_t phy_reg;
   14989 
   14990 		if (sc->phy.acquire(sc) != 0)
   14991 			return -1;
   14992 
   14993 		/* Clear Auto Enable LPI after link up */
   14994 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14995 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14996 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14997 
   14998 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14999 			/* Restore clear on SMB if no manageability engine
   15000 			 * is present
   15001 			 */
   15002 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15003 			    &phy_reg);
   15004 			if (rv != 0)
   15005 				goto release;
   15006 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15007 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15008 
   15009 			/* Disable Proxy */
   15010 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15011 		}
   15012 		/* Enable reset on MTA */
   15013 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15014 		if (rv != 0)
   15015 			goto release;
   15016 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15017 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15018 
   15019 release:
   15020 		sc->phy.release(sc);
   15021 		return rv;
   15022 	}
   15023 
   15024 	return 0;
   15025 }
   15026 
   15027 static void
   15028 wm_enable_wakeup(struct wm_softc *sc)
   15029 {
   15030 	uint32_t reg, pmreg;
   15031 	pcireg_t pmode;
   15032 	int rv = 0;
   15033 
   15034 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15035 		device_xname(sc->sc_dev), __func__));
   15036 
   15037 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15038 	    &pmreg, NULL) == 0)
   15039 		return;
   15040 
   15041 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15042 		goto pme;
   15043 
   15044 	/* Advertise the wakeup capability */
   15045 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15046 	    | CTRL_SWDPIN(3));
   15047 
   15048 	/* Keep the laser running on fiber adapters */
   15049 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15050 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15051 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15052 		reg |= CTRL_EXT_SWDPIN(3);
   15053 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15054 	}
   15055 
   15056 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15057 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15058 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15059 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15060 		wm_suspend_workarounds_ich8lan(sc);
   15061 
   15062 #if 0	/* For the multicast packet */
   15063 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15064 	reg |= WUFC_MC;
   15065 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15066 #endif
   15067 
   15068 	if (sc->sc_type >= WM_T_PCH) {
   15069 		rv = wm_enable_phy_wakeup(sc);
   15070 		if (rv != 0)
   15071 			goto pme;
   15072 	} else {
   15073 		/* Enable wakeup by the MAC */
   15074 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15075 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15076 	}
   15077 
   15078 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15079 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15080 		|| (sc->sc_type == WM_T_PCH2))
   15081 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15082 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15083 
   15084 pme:
   15085 	/* Request PME */
   15086 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15087 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15088 		/* For WOL */
   15089 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15090 	} else {
   15091 		/* Disable WOL */
   15092 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15093 	}
   15094 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15095 }
   15096 
   15097 /* Disable ASPM L0s and/or L1 for workaround */
   15098 static void
   15099 wm_disable_aspm(struct wm_softc *sc)
   15100 {
   15101 	pcireg_t reg, mask = 0;
   15102 	unsigned const char *str = "";
   15103 
   15104 	/*
   15105 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15106 	 * space.
   15107 	 */
   15108 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15109 		return;
   15110 
   15111 	switch (sc->sc_type) {
   15112 	case WM_T_82571:
   15113 	case WM_T_82572:
   15114 		/*
   15115 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15116 		 * State Power management L1 State (ASPM L1).
   15117 		 */
   15118 		mask = PCIE_LCSR_ASPM_L1;
   15119 		str = "L1 is";
   15120 		break;
   15121 	case WM_T_82573:
   15122 	case WM_T_82574:
   15123 	case WM_T_82583:
   15124 		/*
   15125 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15126 		 *
   15127 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15128 		 * some chipset.  The document of 82574 and 82583 says that
   15129 		 * disabling L0s with some specific chipset is sufficient,
   15130 		 * but we follow as of the Intel em driver does.
   15131 		 *
   15132 		 * References:
   15133 		 * Errata 8 of the Specification Update of i82573.
   15134 		 * Errata 20 of the Specification Update of i82574.
   15135 		 * Errata 9 of the Specification Update of i82583.
   15136 		 */
   15137 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15138 		str = "L0s and L1 are";
   15139 		break;
   15140 	default:
   15141 		return;
   15142 	}
   15143 
   15144 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15145 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15146 	reg &= ~mask;
   15147 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15148 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15149 
   15150 	/* Print only in wm_attach() */
   15151 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15152 		aprint_verbose_dev(sc->sc_dev,
   15153 		    "ASPM %s disabled to workaround the errata.\n", str);
   15154 }
   15155 
   15156 /* LPLU */
   15157 
   15158 static void
   15159 wm_lplu_d0_disable(struct wm_softc *sc)
   15160 {
   15161 	struct mii_data *mii = &sc->sc_mii;
   15162 	uint32_t reg;
   15163 	uint16_t phyval;
   15164 
   15165 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15166 		device_xname(sc->sc_dev), __func__));
   15167 
   15168 	if (sc->sc_phytype == WMPHY_IFE)
   15169 		return;
   15170 
   15171 	switch (sc->sc_type) {
   15172 	case WM_T_82571:
   15173 	case WM_T_82572:
   15174 	case WM_T_82573:
   15175 	case WM_T_82575:
   15176 	case WM_T_82576:
   15177 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15178 		phyval &= ~PMR_D0_LPLU;
   15179 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15180 		break;
   15181 	case WM_T_82580:
   15182 	case WM_T_I350:
   15183 	case WM_T_I210:
   15184 	case WM_T_I211:
   15185 		reg = CSR_READ(sc, WMREG_PHPM);
   15186 		reg &= ~PHPM_D0A_LPLU;
   15187 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15188 		break;
   15189 	case WM_T_82574:
   15190 	case WM_T_82583:
   15191 	case WM_T_ICH8:
   15192 	case WM_T_ICH9:
   15193 	case WM_T_ICH10:
   15194 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15195 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15196 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15197 		CSR_WRITE_FLUSH(sc);
   15198 		break;
   15199 	case WM_T_PCH:
   15200 	case WM_T_PCH2:
   15201 	case WM_T_PCH_LPT:
   15202 	case WM_T_PCH_SPT:
   15203 	case WM_T_PCH_CNP:
   15204 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15205 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15206 		if (wm_phy_resetisblocked(sc) == false)
   15207 			phyval |= HV_OEM_BITS_ANEGNOW;
   15208 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15209 		break;
   15210 	default:
   15211 		break;
   15212 	}
   15213 }
   15214 
   15215 /* EEE */
   15216 
   15217 static int
   15218 wm_set_eee_i350(struct wm_softc *sc)
   15219 {
   15220 	struct ethercom *ec = &sc->sc_ethercom;
   15221 	uint32_t ipcnfg, eeer;
   15222 	uint32_t ipcnfg_mask
   15223 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15224 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15225 
   15226 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15227 
   15228 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15229 	eeer = CSR_READ(sc, WMREG_EEER);
   15230 
   15231 	/* Enable or disable per user setting */
   15232 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15233 		ipcnfg |= ipcnfg_mask;
   15234 		eeer |= eeer_mask;
   15235 	} else {
   15236 		ipcnfg &= ~ipcnfg_mask;
   15237 		eeer &= ~eeer_mask;
   15238 	}
   15239 
   15240 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15241 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15242 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15243 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15244 
   15245 	return 0;
   15246 }
   15247 
   15248 static int
   15249 wm_set_eee_pchlan(struct wm_softc *sc)
   15250 {
   15251 	device_t dev = sc->sc_dev;
   15252 	struct ethercom *ec = &sc->sc_ethercom;
   15253 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15254 	int rv = 0;
   15255 
   15256 	switch (sc->sc_phytype) {
   15257 	case WMPHY_82579:
   15258 		lpa = I82579_EEE_LP_ABILITY;
   15259 		pcs_status = I82579_EEE_PCS_STATUS;
   15260 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15261 		break;
   15262 	case WMPHY_I217:
   15263 		lpa = I217_EEE_LP_ABILITY;
   15264 		pcs_status = I217_EEE_PCS_STATUS;
   15265 		adv_addr = I217_EEE_ADVERTISEMENT;
   15266 		break;
   15267 	default:
   15268 		return 0;
   15269 	}
   15270 
   15271 	if (sc->phy.acquire(sc)) {
   15272 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15273 		return 0;
   15274 	}
   15275 
   15276 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15277 	if (rv != 0)
   15278 		goto release;
   15279 
   15280 	/* Clear bits that enable EEE in various speeds */
   15281 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15282 
   15283 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15284 		/* Save off link partner's EEE ability */
   15285 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15286 		if (rv != 0)
   15287 			goto release;
   15288 
   15289 		/* Read EEE advertisement */
   15290 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15291 			goto release;
   15292 
   15293 		/*
   15294 		 * Enable EEE only for speeds in which the link partner is
   15295 		 * EEE capable and for which we advertise EEE.
   15296 		 */
   15297 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15298 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15299 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15300 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15301 			if ((data & ANLPAR_TX_FD) != 0)
   15302 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15303 			else {
   15304 				/*
   15305 				 * EEE is not supported in 100Half, so ignore
   15306 				 * partner's EEE in 100 ability if full-duplex
   15307 				 * is not advertised.
   15308 				 */
   15309 				sc->eee_lp_ability
   15310 				    &= ~AN_EEEADVERT_100_TX;
   15311 			}
   15312 		}
   15313 	}
   15314 
   15315 	if (sc->sc_phytype == WMPHY_82579) {
   15316 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15317 		if (rv != 0)
   15318 			goto release;
   15319 
   15320 		data &= ~I82579_LPI_PLL_SHUT_100;
   15321 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15322 	}
   15323 
   15324 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15325 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15326 		goto release;
   15327 
   15328 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15329 release:
   15330 	sc->phy.release(sc);
   15331 
   15332 	return rv;
   15333 }
   15334 
   15335 static int
   15336 wm_set_eee(struct wm_softc *sc)
   15337 {
   15338 	struct ethercom *ec = &sc->sc_ethercom;
   15339 
   15340 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15341 		return 0;
   15342 
   15343 	if (sc->sc_type == WM_T_I354) {
   15344 		/* I354 uses an external PHY */
   15345 		return 0; /* not yet */
   15346 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15347 		return wm_set_eee_i350(sc);
   15348 	else if (sc->sc_type >= WM_T_PCH2)
   15349 		return wm_set_eee_pchlan(sc);
   15350 
   15351 	return 0;
   15352 }
   15353 
   15354 /*
   15355  * Workarounds (mainly PHY related).
   15356  * Basically, PHY's workarounds are in the PHY drivers.
   15357  */
   15358 
   15359 /* Work-around for 82566 Kumeran PCS lock loss */
   15360 static int
   15361 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15362 {
   15363 	struct mii_data *mii = &sc->sc_mii;
   15364 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15365 	int i, reg, rv;
   15366 	uint16_t phyreg;
   15367 
   15368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15369 		device_xname(sc->sc_dev), __func__));
   15370 
   15371 	/* If the link is not up, do nothing */
   15372 	if ((status & STATUS_LU) == 0)
   15373 		return 0;
   15374 
   15375 	/* Nothing to do if the link is other than 1Gbps */
   15376 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15377 		return 0;
   15378 
   15379 	for (i = 0; i < 10; i++) {
   15380 		/* read twice */
   15381 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15382 		if (rv != 0)
   15383 			return rv;
   15384 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15385 		if (rv != 0)
   15386 			return rv;
   15387 
   15388 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15389 			goto out;	/* GOOD! */
   15390 
   15391 		/* Reset the PHY */
   15392 		wm_reset_phy(sc);
   15393 		delay(5*1000);
   15394 	}
   15395 
   15396 	/* Disable GigE link negotiation */
   15397 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15398 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15399 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15400 
   15401 	/*
   15402 	 * Call gig speed drop workaround on Gig disable before accessing
   15403 	 * any PHY registers.
   15404 	 */
   15405 	wm_gig_downshift_workaround_ich8lan(sc);
   15406 
   15407 out:
   15408 	return 0;
   15409 }
   15410 
   15411 /*
   15412  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15413  *  @sc: pointer to the HW structure
   15414  *
   15415  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15416  *  LPLU, Gig disable, MDIC PHY reset):
   15417  *    1) Set Kumeran Near-end loopback
   15418  *    2) Clear Kumeran Near-end loopback
   15419  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15420  */
   15421 static void
   15422 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15423 {
   15424 	uint16_t kmreg;
   15425 
   15426 	/* Only for igp3 */
   15427 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15428 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15429 			return;
   15430 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15431 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15432 			return;
   15433 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15434 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15435 	}
   15436 }
   15437 
   15438 /*
   15439  * Workaround for pch's PHYs
   15440  * XXX should be moved to new PHY driver?
   15441  */
   15442 static int
   15443 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15444 {
   15445 	device_t dev = sc->sc_dev;
   15446 	struct mii_data *mii = &sc->sc_mii;
   15447 	struct mii_softc *child;
   15448 	uint16_t phy_data, phyrev = 0;
   15449 	int phytype = sc->sc_phytype;
   15450 	int rv;
   15451 
   15452 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15453 		device_xname(dev), __func__));
   15454 	KASSERT(sc->sc_type == WM_T_PCH);
   15455 
   15456 	/* Set MDIO slow mode before any other MDIO access */
   15457 	if (phytype == WMPHY_82577)
   15458 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15459 			return rv;
   15460 
   15461 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15462 	if (child != NULL)
   15463 		phyrev = child->mii_mpd_rev;
   15464 
   15465 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15466 	if ((child != NULL) &&
   15467 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15468 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15469 		/* Disable generation of early preamble (0x4431) */
   15470 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15471 		    &phy_data);
   15472 		if (rv != 0)
   15473 			return rv;
   15474 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15475 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15476 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15477 		    phy_data);
   15478 		if (rv != 0)
   15479 			return rv;
   15480 
   15481 		/* Preamble tuning for SSC */
   15482 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15483 		if (rv != 0)
   15484 			return rv;
   15485 	}
   15486 
   15487 	/* 82578 */
   15488 	if (phytype == WMPHY_82578) {
   15489 		/*
   15490 		 * Return registers to default by doing a soft reset then
   15491 		 * writing 0x3140 to the control register
   15492 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15493 		 */
   15494 		if ((child != NULL) && (phyrev < 2)) {
   15495 			PHY_RESET(child);
   15496 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15497 			    0x3140);
   15498 			if (rv != 0)
   15499 				return rv;
   15500 		}
   15501 	}
   15502 
   15503 	/* Select page 0 */
   15504 	if ((rv = sc->phy.acquire(sc)) != 0)
   15505 		return rv;
   15506 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15507 	sc->phy.release(sc);
   15508 	if (rv != 0)
   15509 		return rv;
   15510 
   15511 	/*
   15512 	 * Configure the K1 Si workaround during phy reset assuming there is
   15513 	 * link so that it disables K1 if link is in 1Gbps.
   15514 	 */
   15515 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15516 		return rv;
   15517 
   15518 	/* Workaround for link disconnects on a busy hub in half duplex */
   15519 	rv = sc->phy.acquire(sc);
   15520 	if (rv)
   15521 		return rv;
   15522 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15523 	if (rv)
   15524 		goto release;
   15525 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15526 	    phy_data & 0x00ff);
   15527 	if (rv)
   15528 		goto release;
   15529 
   15530 	/* Set MSE higher to enable link to stay up when noise is high */
   15531 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15532 release:
   15533 	sc->phy.release(sc);
   15534 
   15535 	return rv;
   15536 }
   15537 
   15538 /*
   15539  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15540  *  @sc:   pointer to the HW structure
   15541  */
   15542 static void
   15543 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15544 {
   15545 	device_t dev = sc->sc_dev;
   15546 	uint32_t mac_reg;
   15547 	uint16_t i, wuce;
   15548 	int count;
   15549 
   15550 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15551 		device_xname(sc->sc_dev), __func__));
   15552 
   15553 	if (sc->phy.acquire(sc) != 0)
   15554 		return;
   15555 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15556 		goto release;
   15557 
   15558 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15559 	count = wm_rar_count(sc);
   15560 	for (i = 0; i < count; i++) {
   15561 		uint16_t lo, hi;
   15562 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15563 		lo = (uint16_t)(mac_reg & 0xffff);
   15564 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15565 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15566 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15567 
   15568 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15569 		lo = (uint16_t)(mac_reg & 0xffff);
   15570 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15571 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15572 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15573 	}
   15574 
   15575 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15576 
   15577 release:
   15578 	sc->phy.release(sc);
   15579 }
   15580 
   15581 /*
   15582  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15583  *  done after every PHY reset.
   15584  */
   15585 static int
   15586 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15587 {
   15588 	device_t dev = sc->sc_dev;
   15589 	int rv;
   15590 
   15591 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15592 		device_xname(dev), __func__));
   15593 	KASSERT(sc->sc_type == WM_T_PCH2);
   15594 
   15595 	/* Set MDIO slow mode before any other MDIO access */
   15596 	rv = wm_set_mdio_slow_mode_hv(sc);
   15597 	if (rv != 0)
   15598 		return rv;
   15599 
   15600 	rv = sc->phy.acquire(sc);
   15601 	if (rv != 0)
   15602 		return rv;
   15603 	/* Set MSE higher to enable link to stay up when noise is high */
   15604 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15605 	if (rv != 0)
   15606 		goto release;
   15607 	/* Drop link after 5 times MSE threshold was reached */
   15608 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15609 release:
   15610 	sc->phy.release(sc);
   15611 
   15612 	return rv;
   15613 }
   15614 
   15615 /**
   15616  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15617  *  @link: link up bool flag
   15618  *
   15619  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15620  *  preventing further DMA write requests.  Workaround the issue by disabling
   15621  *  the de-assertion of the clock request when in 1Gpbs mode.
   15622  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15623  *  speeds in order to avoid Tx hangs.
   15624  **/
   15625 static int
   15626 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15627 {
   15628 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15629 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15630 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15631 	uint16_t phyreg;
   15632 
   15633 	if (link && (speed == STATUS_SPEED_1000)) {
   15634 		sc->phy.acquire(sc);
   15635 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15636 		    &phyreg);
   15637 		if (rv != 0)
   15638 			goto release;
   15639 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15640 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15641 		if (rv != 0)
   15642 			goto release;
   15643 		delay(20);
   15644 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15645 
   15646 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15647 		    &phyreg);
   15648 release:
   15649 		sc->phy.release(sc);
   15650 		return rv;
   15651 	}
   15652 
   15653 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15654 
   15655 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15656 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15657 	    || !link
   15658 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15659 		goto update_fextnvm6;
   15660 
   15661 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15662 
   15663 	/* Clear link status transmit timeout */
   15664 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15665 	if (speed == STATUS_SPEED_100) {
   15666 		/* Set inband Tx timeout to 5x10us for 100Half */
   15667 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15668 
   15669 		/* Do not extend the K1 entry latency for 100Half */
   15670 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15671 	} else {
   15672 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15673 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15674 
   15675 		/* Extend the K1 entry latency for 10 Mbps */
   15676 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15677 	}
   15678 
   15679 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15680 
   15681 update_fextnvm6:
   15682 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15683 	return 0;
   15684 }
   15685 
   15686 /*
   15687  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15688  *  @sc:   pointer to the HW structure
   15689  *  @link: link up bool flag
   15690  *
   15691  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15692  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15693  *  If link is down, the function will restore the default K1 setting located
   15694  *  in the NVM.
   15695  */
   15696 static int
   15697 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15698 {
   15699 	int k1_enable = sc->sc_nvm_k1_enabled;
   15700 
   15701 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15702 		device_xname(sc->sc_dev), __func__));
   15703 
   15704 	if (sc->phy.acquire(sc) != 0)
   15705 		return -1;
   15706 
   15707 	if (link) {
   15708 		k1_enable = 0;
   15709 
   15710 		/* Link stall fix for link up */
   15711 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15712 		    0x0100);
   15713 	} else {
   15714 		/* Link stall fix for link down */
   15715 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15716 		    0x4100);
   15717 	}
   15718 
   15719 	wm_configure_k1_ich8lan(sc, k1_enable);
   15720 	sc->phy.release(sc);
   15721 
   15722 	return 0;
   15723 }
   15724 
   15725 /*
   15726  *  wm_k1_workaround_lv - K1 Si workaround
   15727  *  @sc:   pointer to the HW structure
   15728  *
   15729  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15730  *  Disable K1 for 1000 and 100 speeds
   15731  */
   15732 static int
   15733 wm_k1_workaround_lv(struct wm_softc *sc)
   15734 {
   15735 	uint32_t reg;
   15736 	uint16_t phyreg;
   15737 	int rv;
   15738 
   15739 	if (sc->sc_type != WM_T_PCH2)
   15740 		return 0;
   15741 
   15742 	/* Set K1 beacon duration based on 10Mbps speed */
   15743 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15744 	if (rv != 0)
   15745 		return rv;
   15746 
   15747 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15748 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15749 		if (phyreg &
   15750 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15751 			/* LV 1G/100 Packet drop issue wa  */
   15752 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15753 			    &phyreg);
   15754 			if (rv != 0)
   15755 				return rv;
   15756 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15757 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15758 			    phyreg);
   15759 			if (rv != 0)
   15760 				return rv;
   15761 		} else {
   15762 			/* For 10Mbps */
   15763 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15764 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15765 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15766 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15767 		}
   15768 	}
   15769 
   15770 	return 0;
   15771 }
   15772 
   15773 /*
   15774  *  wm_link_stall_workaround_hv - Si workaround
   15775  *  @sc: pointer to the HW structure
   15776  *
   15777  *  This function works around a Si bug where the link partner can get
   15778  *  a link up indication before the PHY does. If small packets are sent
   15779  *  by the link partner they can be placed in the packet buffer without
   15780  *  being properly accounted for by the PHY and will stall preventing
   15781  *  further packets from being received.  The workaround is to clear the
   15782  *  packet buffer after the PHY detects link up.
   15783  */
   15784 static int
   15785 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15786 {
   15787 	uint16_t phyreg;
   15788 
   15789 	if (sc->sc_phytype != WMPHY_82578)
   15790 		return 0;
   15791 
   15792 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15793 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15794 	if ((phyreg & BMCR_LOOP) != 0)
   15795 		return 0;
   15796 
   15797 	/* Check if link is up and at 1Gbps */
   15798 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15799 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15800 	    | BM_CS_STATUS_SPEED_MASK;
   15801 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15802 		| BM_CS_STATUS_SPEED_1000))
   15803 		return 0;
   15804 
   15805 	delay(200 * 1000);	/* XXX too big */
   15806 
   15807 	/* Flush the packets in the fifo buffer */
   15808 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15809 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15810 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15811 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15812 
   15813 	return 0;
   15814 }
   15815 
   15816 static int
   15817 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15818 {
   15819 	int rv;
   15820 	uint16_t reg;
   15821 
   15822 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15823 	if (rv != 0)
   15824 		return rv;
   15825 
   15826 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15827 	    reg | HV_KMRN_MDIO_SLOW);
   15828 }
   15829 
   15830 /*
   15831  *  wm_configure_k1_ich8lan - Configure K1 power state
   15832  *  @sc: pointer to the HW structure
   15833  *  @enable: K1 state to configure
   15834  *
   15835  *  Configure the K1 power state based on the provided parameter.
   15836  *  Assumes semaphore already acquired.
   15837  */
   15838 static void
   15839 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15840 {
   15841 	uint32_t ctrl, ctrl_ext, tmp;
   15842 	uint16_t kmreg;
   15843 	int rv;
   15844 
   15845 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15846 
   15847 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15848 	if (rv != 0)
   15849 		return;
   15850 
   15851 	if (k1_enable)
   15852 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15853 	else
   15854 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15855 
   15856 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15857 	if (rv != 0)
   15858 		return;
   15859 
   15860 	delay(20);
   15861 
   15862 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15863 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15864 
   15865 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15866 	tmp |= CTRL_FRCSPD;
   15867 
   15868 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15869 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15870 	CSR_WRITE_FLUSH(sc);
   15871 	delay(20);
   15872 
   15873 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15874 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15875 	CSR_WRITE_FLUSH(sc);
   15876 	delay(20);
   15877 
   15878 	return;
   15879 }
   15880 
   15881 /* special case - for 82575 - need to do manual init ... */
   15882 static void
   15883 wm_reset_init_script_82575(struct wm_softc *sc)
   15884 {
   15885 	/*
   15886 	 * Remark: this is untested code - we have no board without EEPROM
   15887 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15888 	 */
   15889 
   15890 	/* SerDes configuration via SERDESCTRL */
   15891 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15892 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15893 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15894 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15895 
   15896 	/* CCM configuration via CCMCTL register */
   15897 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15899 
   15900 	/* PCIe lanes configuration */
   15901 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15902 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15903 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15904 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15905 
   15906 	/* PCIe PLL Configuration */
   15907 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15908 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15909 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15910 }
   15911 
   15912 static void
   15913 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15914 {
   15915 	uint32_t reg;
   15916 	uint16_t nvmword;
   15917 	int rv;
   15918 
   15919 	if (sc->sc_type != WM_T_82580)
   15920 		return;
   15921 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15922 		return;
   15923 
   15924 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15925 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15926 	if (rv != 0) {
   15927 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15928 		    __func__);
   15929 		return;
   15930 	}
   15931 
   15932 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15933 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15934 		reg |= MDICNFG_DEST;
   15935 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15936 		reg |= MDICNFG_COM_MDIO;
   15937 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15938 }
   15939 
   15940 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15941 
   15942 static bool
   15943 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15944 {
   15945 	uint32_t reg;
   15946 	uint16_t id1, id2;
   15947 	int i, rv;
   15948 
   15949 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15950 		device_xname(sc->sc_dev), __func__));
   15951 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15952 
   15953 	id1 = id2 = 0xffff;
   15954 	for (i = 0; i < 2; i++) {
   15955 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15956 		    &id1);
   15957 		if ((rv != 0) || MII_INVALIDID(id1))
   15958 			continue;
   15959 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15960 		    &id2);
   15961 		if ((rv != 0) || MII_INVALIDID(id2))
   15962 			continue;
   15963 		break;
   15964 	}
   15965 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15966 		goto out;
   15967 
   15968 	/*
   15969 	 * In case the PHY needs to be in mdio slow mode,
   15970 	 * set slow mode and try to get the PHY id again.
   15971 	 */
   15972 	rv = 0;
   15973 	if (sc->sc_type < WM_T_PCH_LPT) {
   15974 		sc->phy.release(sc);
   15975 		wm_set_mdio_slow_mode_hv(sc);
   15976 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15977 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15978 		sc->phy.acquire(sc);
   15979 	}
   15980 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15981 		device_printf(sc->sc_dev, "XXX return with false\n");
   15982 		return false;
   15983 	}
   15984 out:
   15985 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15986 		/* Only unforce SMBus if ME is not active */
   15987 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15988 			uint16_t phyreg;
   15989 
   15990 			/* Unforce SMBus mode in PHY */
   15991 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15992 			    CV_SMB_CTRL, &phyreg);
   15993 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15994 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15995 			    CV_SMB_CTRL, phyreg);
   15996 
   15997 			/* Unforce SMBus mode in MAC */
   15998 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15999 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16000 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16001 		}
   16002 	}
   16003 	return true;
   16004 }
   16005 
   16006 static void
   16007 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16008 {
   16009 	uint32_t reg;
   16010 	int i;
   16011 
   16012 	/* Set PHY Config Counter to 50msec */
   16013 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16014 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16015 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16016 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16017 
   16018 	/* Toggle LANPHYPC */
   16019 	reg = CSR_READ(sc, WMREG_CTRL);
   16020 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16021 	reg &= ~CTRL_LANPHYPC_VALUE;
   16022 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16023 	CSR_WRITE_FLUSH(sc);
   16024 	delay(1000);
   16025 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16026 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16027 	CSR_WRITE_FLUSH(sc);
   16028 
   16029 	if (sc->sc_type < WM_T_PCH_LPT)
   16030 		delay(50 * 1000);
   16031 	else {
   16032 		i = 20;
   16033 
   16034 		do {
   16035 			delay(5 * 1000);
   16036 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16037 		    && i--);
   16038 
   16039 		delay(30 * 1000);
   16040 	}
   16041 }
   16042 
   16043 static int
   16044 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16045 {
   16046 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16047 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16048 	uint32_t rxa;
   16049 	uint16_t scale = 0, lat_enc = 0;
   16050 	int32_t obff_hwm = 0;
   16051 	int64_t lat_ns, value;
   16052 
   16053 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16054 		device_xname(sc->sc_dev), __func__));
   16055 
   16056 	if (link) {
   16057 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16058 		uint32_t status;
   16059 		uint16_t speed;
   16060 		pcireg_t preg;
   16061 
   16062 		status = CSR_READ(sc, WMREG_STATUS);
   16063 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16064 		case STATUS_SPEED_10:
   16065 			speed = 10;
   16066 			break;
   16067 		case STATUS_SPEED_100:
   16068 			speed = 100;
   16069 			break;
   16070 		case STATUS_SPEED_1000:
   16071 			speed = 1000;
   16072 			break;
   16073 		default:
   16074 			device_printf(sc->sc_dev, "Unknown speed "
   16075 			    "(status = %08x)\n", status);
   16076 			return -1;
   16077 		}
   16078 
   16079 		/* Rx Packet Buffer Allocation size (KB) */
   16080 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16081 
   16082 		/*
   16083 		 * Determine the maximum latency tolerated by the device.
   16084 		 *
   16085 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16086 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16087 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16088 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16089 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16090 		 */
   16091 		lat_ns = ((int64_t)rxa * 1024 -
   16092 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16093 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16094 		if (lat_ns < 0)
   16095 			lat_ns = 0;
   16096 		else
   16097 			lat_ns /= speed;
   16098 		value = lat_ns;
   16099 
   16100 		while (value > LTRV_VALUE) {
   16101 			scale ++;
   16102 			value = howmany(value, __BIT(5));
   16103 		}
   16104 		if (scale > LTRV_SCALE_MAX) {
   16105 			device_printf(sc->sc_dev,
   16106 			    "Invalid LTR latency scale %d\n", scale);
   16107 			return -1;
   16108 		}
   16109 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16110 
   16111 		/* Determine the maximum latency tolerated by the platform */
   16112 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16113 		    WM_PCI_LTR_CAP_LPT);
   16114 		max_snoop = preg & 0xffff;
   16115 		max_nosnoop = preg >> 16;
   16116 
   16117 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16118 
   16119 		if (lat_enc > max_ltr_enc) {
   16120 			lat_enc = max_ltr_enc;
   16121 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16122 			    * PCI_LTR_SCALETONS(
   16123 				    __SHIFTOUT(lat_enc,
   16124 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16125 		}
   16126 
   16127 		if (lat_ns) {
   16128 			lat_ns *= speed * 1000;
   16129 			lat_ns /= 8;
   16130 			lat_ns /= 1000000000;
   16131 			obff_hwm = (int32_t)(rxa - lat_ns);
   16132 		}
   16133 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16134 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16135 			    "(rxa = %d, lat_ns = %d)\n",
   16136 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16137 			return -1;
   16138 		}
   16139 	}
   16140 	/* Snoop and No-Snoop latencies the same */
   16141 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16142 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16143 
   16144 	/* Set OBFF high water mark */
   16145 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16146 	reg |= obff_hwm;
   16147 	CSR_WRITE(sc, WMREG_SVT, reg);
   16148 
   16149 	/* Enable OBFF */
   16150 	reg = CSR_READ(sc, WMREG_SVCR);
   16151 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16152 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16153 
   16154 	return 0;
   16155 }
   16156 
   16157 /*
   16158  * I210 Errata 25 and I211 Errata 10
   16159  * Slow System Clock.
   16160  */
   16161 static int
   16162 wm_pll_workaround_i210(struct wm_softc *sc)
   16163 {
   16164 	uint32_t mdicnfg, wuc;
   16165 	uint32_t reg;
   16166 	pcireg_t pcireg;
   16167 	uint32_t pmreg;
   16168 	uint16_t nvmword, tmp_nvmword;
   16169 	uint16_t phyval;
   16170 	bool wa_done = false;
   16171 	int i, rv = 0;
   16172 
   16173 	/* Get Power Management cap offset */
   16174 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16175 	    &pmreg, NULL) == 0)
   16176 		return -1;
   16177 
   16178 	/* Save WUC and MDICNFG registers */
   16179 	wuc = CSR_READ(sc, WMREG_WUC);
   16180 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16181 
   16182 	reg = mdicnfg & ~MDICNFG_DEST;
   16183 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16184 
   16185 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16186 		nvmword = INVM_DEFAULT_AL;
   16187 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16188 
   16189 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16190 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16191 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16192 
   16193 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16194 			rv = 0;
   16195 			break; /* OK */
   16196 		} else
   16197 			rv = -1;
   16198 
   16199 		wa_done = true;
   16200 		/* Directly reset the internal PHY */
   16201 		reg = CSR_READ(sc, WMREG_CTRL);
   16202 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16203 
   16204 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16205 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16206 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16207 
   16208 		CSR_WRITE(sc, WMREG_WUC, 0);
   16209 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16210 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16211 
   16212 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16213 		    pmreg + PCI_PMCSR);
   16214 		pcireg |= PCI_PMCSR_STATE_D3;
   16215 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16216 		    pmreg + PCI_PMCSR, pcireg);
   16217 		delay(1000);
   16218 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16219 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16220 		    pmreg + PCI_PMCSR, pcireg);
   16221 
   16222 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16223 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16224 
   16225 		/* Restore WUC register */
   16226 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16227 	}
   16228 
   16229 	/* Restore MDICNFG setting */
   16230 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16231 	if (wa_done)
   16232 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16233 	return rv;
   16234 }
   16235 
   16236 static void
   16237 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16238 {
   16239 	uint32_t reg;
   16240 
   16241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16242 		device_xname(sc->sc_dev), __func__));
   16243 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16244 	    || (sc->sc_type == WM_T_PCH_CNP));
   16245 
   16246 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16247 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16248 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16249 
   16250 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16251 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16252 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16253 }
   16254