Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.421
      1 /*	$NetBSD: if_wm.c,v 1.421 2016/10/20 04:06:53 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.421 2016/10/20 04:06:53 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 #ifdef WM_EVENT_COUNTERS
    328 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    329 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    330 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    331 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    332 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    333 						/* XXX not used? */
    334 
    335 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    336 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    337 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    339 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    340 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    341 
    342 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    345 
    346 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    347 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    348 #endif /* WM_EVENT_COUNTERS */
    349 };
    350 
    351 struct wm_rxqueue {
    352 	kmutex_t *rxq_lock;		/* lock for rx operations */
    353 
    354 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    355 
    356 	/* Software state for the receive descriptors. */
    357 	wiseman_rxdesc_t *rxq_descs;
    358 
    359 	/* RX control data structures. */
    360 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    361 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    363 	int rxq_desc_rseg;		/* real number of control segment */
    364 	size_t rxq_desc_size;		/* control data size */
    365 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    366 
    367 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    368 
    369 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    370 	int rxq_discard;
    371 	int rxq_len;
    372 	struct mbuf *rxq_head;
    373 	struct mbuf *rxq_tail;
    374 	struct mbuf **rxq_tailp;
    375 
    376 #ifdef WM_EVENT_COUNTERS
    377 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    378 
    379 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    380 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    381 #endif
    382 };
    383 
    384 struct wm_queue {
    385 	int wmq_id;			/* index of transmit and receive queues */
    386 	int wmq_intr_idx;		/* index of MSI-X tables */
    387 
    388 	struct wm_txqueue wmq_txq;
    389 	struct wm_rxqueue wmq_rxq;
    390 };
    391 
    392 /*
    393  * Software state per device.
    394  */
    395 struct wm_softc {
    396 	device_t sc_dev;		/* generic device information */
    397 	bus_space_tag_t sc_st;		/* bus space tag */
    398 	bus_space_handle_t sc_sh;	/* bus space handle */
    399 	bus_size_t sc_ss;		/* bus space size */
    400 	bus_space_tag_t sc_iot;		/* I/O space tag */
    401 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    402 	bus_size_t sc_ios;		/* I/O space size */
    403 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    404 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    405 	bus_size_t sc_flashs;		/* flash registers space size */
    406 	off_t sc_flashreg_offset;	/*
    407 					 * offset to flash registers from
    408 					 * start of BAR
    409 					 */
    410 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    411 
    412 	struct ethercom sc_ethercom;	/* ethernet common data */
    413 	struct mii_data sc_mii;		/* MII/media information */
    414 
    415 	pci_chipset_tag_t sc_pc;
    416 	pcitag_t sc_pcitag;
    417 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    418 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    419 
    420 	uint16_t sc_pcidevid;		/* PCI device ID */
    421 	wm_chip_type sc_type;		/* MAC type */
    422 	int sc_rev;			/* MAC revision */
    423 	wm_phy_type sc_phytype;		/* PHY type */
    424 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    425 #define	WM_MEDIATYPE_UNKNOWN		0x00
    426 #define	WM_MEDIATYPE_FIBER		0x01
    427 #define	WM_MEDIATYPE_COPPER		0x02
    428 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    429 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    430 	int sc_flags;			/* flags; see below */
    431 	int sc_if_flags;		/* last if_flags */
    432 	int sc_flowflags;		/* 802.3x flow control flags */
    433 	int sc_align_tweak;
    434 
    435 	void *sc_ihs[WM_MAX_NINTR];	/*
    436 					 * interrupt cookie.
    437 					 * legacy and msi use sc_ihs[0].
    438 					 */
    439 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    440 	int sc_nintrs;			/* number of interrupts */
    441 
    442 	int sc_link_intr_idx;		/* index of MSI-X tables */
    443 
    444 	callout_t sc_tick_ch;		/* tick callout */
    445 	bool sc_stopping;
    446 
    447 	int sc_nvm_ver_major;
    448 	int sc_nvm_ver_minor;
    449 	int sc_nvm_ver_build;
    450 	int sc_nvm_addrbits;		/* NVM address bits */
    451 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    452 	int sc_ich8_flash_base;
    453 	int sc_ich8_flash_bank_size;
    454 	int sc_nvm_k1_enabled;
    455 
    456 	int sc_nqueues;
    457 	struct wm_queue *sc_queue;
    458 
    459 	int sc_affinity_offset;
    460 
    461 #ifdef WM_EVENT_COUNTERS
    462 	/* Event counters. */
    463 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    464 
    465         /* WM_T_82542_2_1 only */
    466 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    467 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    468 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    469 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    470 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    471 #endif /* WM_EVENT_COUNTERS */
    472 
    473 	/* This variable are used only on the 82547. */
    474 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    475 
    476 	uint32_t sc_ctrl;		/* prototype CTRL register */
    477 #if 0
    478 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    479 #endif
    480 	uint32_t sc_icr;		/* prototype interrupt bits */
    481 	uint32_t sc_itr;		/* prototype intr throttling reg */
    482 	uint32_t sc_tctl;		/* prototype TCTL register */
    483 	uint32_t sc_rctl;		/* prototype RCTL register */
    484 	uint32_t sc_txcw;		/* prototype TXCW register */
    485 	uint32_t sc_tipg;		/* prototype TIPG register */
    486 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    487 	uint32_t sc_pba;		/* prototype PBA register */
    488 
    489 	int sc_tbi_linkup;		/* TBI link status */
    490 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    491 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    492 
    493 	int sc_mchash_type;		/* multicast filter offset */
    494 
    495 	krndsource_t rnd_source;	/* random source */
    496 
    497 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    498 
    499 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    500 };
    501 
    502 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    503 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    504 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    505 
    506 #ifdef WM_MPSAFE
    507 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    508 #else
    509 #define CALLOUT_FLAGS	0
    510 #endif
    511 
    512 #define	WM_RXCHAIN_RESET(rxq)						\
    513 do {									\
    514 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    515 	*(rxq)->rxq_tailp = NULL;					\
    516 	(rxq)->rxq_len = 0;						\
    517 } while (/*CONSTCOND*/0)
    518 
    519 #define	WM_RXCHAIN_LINK(rxq, m)						\
    520 do {									\
    521 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    522 	(rxq)->rxq_tailp = &(m)->m_next;				\
    523 } while (/*CONSTCOND*/0)
    524 
    525 #ifdef WM_EVENT_COUNTERS
    526 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    527 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    528 
    529 #define WM_Q_EVCNT_INCR(qname, evname)			\
    530 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    531 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    532 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    533 #else /* !WM_EVENT_COUNTERS */
    534 #define	WM_EVCNT_INCR(ev)	/* nothing */
    535 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    536 
    537 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    538 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    539 #endif /* !WM_EVENT_COUNTERS */
    540 
    541 #define	CSR_READ(sc, reg)						\
    542 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    543 #define	CSR_WRITE(sc, reg, val)						\
    544 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    545 #define	CSR_WRITE_FLUSH(sc)						\
    546 	(void) CSR_READ((sc), WMREG_STATUS)
    547 
    548 #define ICH8_FLASH_READ32(sc, reg)					\
    549 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    550 	    (reg) + sc->sc_flashreg_offset)
    551 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    552 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    553 	    (reg) + sc->sc_flashreg_offset, (data))
    554 
    555 #define ICH8_FLASH_READ16(sc, reg)					\
    556 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    557 	    (reg) + sc->sc_flashreg_offset)
    558 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    559 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    560 	    (reg) + sc->sc_flashreg_offset, (data))
    561 
    562 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    563 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    564 
    565 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    566 #define	WM_CDTXADDR_HI(txq, x)						\
    567 	(sizeof(bus_addr_t) == 8 ?					\
    568 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    569 
    570 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    571 #define	WM_CDRXADDR_HI(rxq, x)						\
    572 	(sizeof(bus_addr_t) == 8 ?					\
    573 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    574 
    575 /*
    576  * Register read/write functions.
    577  * Other than CSR_{READ|WRITE}().
    578  */
    579 #if 0
    580 static inline uint32_t wm_io_read(struct wm_softc *, int);
    581 #endif
    582 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    583 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    584 	uint32_t, uint32_t);
    585 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    586 
    587 /*
    588  * Descriptor sync/init functions.
    589  */
    590 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    591 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    592 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    593 
    594 /*
    595  * Device driver interface functions and commonly used functions.
    596  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    597  */
    598 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    599 static int	wm_match(device_t, cfdata_t, void *);
    600 static void	wm_attach(device_t, device_t, void *);
    601 static int	wm_detach(device_t, int);
    602 static bool	wm_suspend(device_t, const pmf_qual_t *);
    603 static bool	wm_resume(device_t, const pmf_qual_t *);
    604 static void	wm_watchdog(struct ifnet *);
    605 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    606 static void	wm_tick(void *);
    607 static int	wm_ifflags_cb(struct ethercom *);
    608 static int	wm_ioctl(struct ifnet *, u_long, void *);
    609 /* MAC address related */
    610 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    611 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    612 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    613 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    614 static void	wm_set_filter(struct wm_softc *);
    615 /* Reset and init related */
    616 static void	wm_set_vlan(struct wm_softc *);
    617 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    618 static void	wm_get_auto_rd_done(struct wm_softc *);
    619 static void	wm_lan_init_done(struct wm_softc *);
    620 static void	wm_get_cfg_done(struct wm_softc *);
    621 static void	wm_initialize_hardware_bits(struct wm_softc *);
    622 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    623 static void	wm_reset(struct wm_softc *);
    624 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    625 static void	wm_rxdrain(struct wm_rxqueue *);
    626 static void	wm_rss_getkey(uint8_t *);
    627 static void	wm_init_rss(struct wm_softc *);
    628 static void	wm_adjust_qnum(struct wm_softc *, int);
    629 static int	wm_setup_legacy(struct wm_softc *);
    630 static int	wm_setup_msix(struct wm_softc *);
    631 static int	wm_init(struct ifnet *);
    632 static int	wm_init_locked(struct ifnet *);
    633 static void	wm_stop(struct ifnet *, int);
    634 static void	wm_stop_locked(struct ifnet *, int);
    635 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    636 static void	wm_82547_txfifo_stall(void *);
    637 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    638 /* DMA related */
    639 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    640 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    641 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    642 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    643     struct wm_txqueue *);
    644 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    645 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    646 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    647     struct wm_rxqueue *);
    648 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    649 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    650 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    651 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    652 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    653 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    654 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    655     struct wm_txqueue *);
    656 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    657     struct wm_rxqueue *);
    658 static int	wm_alloc_txrx_queues(struct wm_softc *);
    659 static void	wm_free_txrx_queues(struct wm_softc *);
    660 static int	wm_init_txrx_queues(struct wm_softc *);
    661 /* Start */
    662 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    663     uint32_t *, uint8_t *);
    664 static void	wm_start(struct ifnet *);
    665 static void	wm_start_locked(struct ifnet *);
    666 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    667     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    668 static void	wm_nq_start(struct ifnet *);
    669 static void	wm_nq_start_locked(struct ifnet *);
    670 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    671 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    672 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    673 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    674 /* Interrupt */
    675 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    676 static void	wm_rxeof(struct wm_rxqueue *);
    677 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    678 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    679 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    680 static void	wm_linkintr(struct wm_softc *, uint32_t);
    681 static int	wm_intr_legacy(void *);
    682 static int	wm_txrxintr_msix(void *);
    683 static int	wm_linkintr_msix(void *);
    684 
    685 /*
    686  * Media related.
    687  * GMII, SGMII, TBI, SERDES and SFP.
    688  */
    689 /* Common */
    690 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    691 /* GMII related */
    692 static void	wm_gmii_reset(struct wm_softc *);
    693 static int	wm_get_phy_id_82575(struct wm_softc *);
    694 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    695 static int	wm_gmii_mediachange(struct ifnet *);
    696 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    697 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    698 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    699 static int	wm_gmii_i82543_readreg(device_t, int, int);
    700 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    701 static int	wm_gmii_i82544_readreg(device_t, int, int);
    702 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    703 static int	wm_gmii_i80003_readreg(device_t, int, int);
    704 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    705 static int	wm_gmii_bm_readreg(device_t, int, int);
    706 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    707 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    708 static int	wm_gmii_hv_readreg(device_t, int, int);
    709 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    710 static int	wm_gmii_82580_readreg(device_t, int, int);
    711 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    712 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    713 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    714 static void	wm_gmii_statchg(struct ifnet *);
    715 static int	wm_kmrn_readreg(struct wm_softc *, int);
    716 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    717 /* SGMII */
    718 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    719 static int	wm_sgmii_readreg(device_t, int, int);
    720 static void	wm_sgmii_writereg(device_t, int, int, int);
    721 /* TBI related */
    722 static void	wm_tbi_mediainit(struct wm_softc *);
    723 static int	wm_tbi_mediachange(struct ifnet *);
    724 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    725 static int	wm_check_for_link(struct wm_softc *);
    726 static void	wm_tbi_tick(struct wm_softc *);
    727 /* SERDES related */
    728 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    729 static int	wm_serdes_mediachange(struct ifnet *);
    730 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    731 static void	wm_serdes_tick(struct wm_softc *);
    732 /* SFP related */
    733 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    734 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    735 
    736 /*
    737  * NVM related.
    738  * Microwire, SPI (w/wo EERD) and Flash.
    739  */
    740 /* Misc functions */
    741 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    742 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    743 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    744 /* Microwire */
    745 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    746 /* SPI */
    747 static int	wm_nvm_ready_spi(struct wm_softc *);
    748 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    749 /* Using with EERD */
    750 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    751 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    752 /* Flash */
    753 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    754     unsigned int *);
    755 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    756 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    757 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    758 	uint32_t *);
    759 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    760 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    761 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    762 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    763 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    764 /* iNVM */
    765 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    766 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    767 /* Lock, detecting NVM type, validate checksum and read */
    768 static int	wm_nvm_acquire(struct wm_softc *);
    769 static void	wm_nvm_release(struct wm_softc *);
    770 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    771 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    772 static int	wm_nvm_validate_checksum(struct wm_softc *);
    773 static void	wm_nvm_version_invm(struct wm_softc *);
    774 static void	wm_nvm_version(struct wm_softc *);
    775 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    776 
    777 /*
    778  * Hardware semaphores.
    779  * Very complexed...
    780  */
    781 static int	wm_get_swsm_semaphore(struct wm_softc *);
    782 static void	wm_put_swsm_semaphore(struct wm_softc *);
    783 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    784 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    785 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    786 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    787 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    788 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    789 
    790 /*
    791  * Management mode and power management related subroutines.
    792  * BMC, AMT, suspend/resume and EEE.
    793  */
    794 #ifdef WM_WOL
    795 static int	wm_check_mng_mode(struct wm_softc *);
    796 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    797 static int	wm_check_mng_mode_82574(struct wm_softc *);
    798 static int	wm_check_mng_mode_generic(struct wm_softc *);
    799 #endif
    800 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    801 static bool	wm_phy_resetisblocked(struct wm_softc *);
    802 static void	wm_get_hw_control(struct wm_softc *);
    803 static void	wm_release_hw_control(struct wm_softc *);
    804 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    805 static void	wm_smbustopci(struct wm_softc *);
    806 static void	wm_init_manageability(struct wm_softc *);
    807 static void	wm_release_manageability(struct wm_softc *);
    808 static void	wm_get_wakeup(struct wm_softc *);
    809 #ifdef WM_WOL
    810 static void	wm_enable_phy_wakeup(struct wm_softc *);
    811 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    812 static void	wm_enable_wakeup(struct wm_softc *);
    813 #endif
    814 /* LPLU (Low Power Link Up) */
    815 static void	wm_lplu_d0_disable(struct wm_softc *);
    816 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    817 /* EEE */
    818 static void	wm_set_eee_i350(struct wm_softc *);
    819 
    820 /*
    821  * Workarounds (mainly PHY related).
    822  * Basically, PHY's workarounds are in the PHY drivers.
    823  */
    824 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    825 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    826 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    827 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    828 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    829 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    830 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    831 static void	wm_reset_init_script_82575(struct wm_softc *);
    832 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    833 static void	wm_pll_workaround_i210(struct wm_softc *);
    834 
    835 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    836     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    837 
    838 /*
    839  * Devices supported by this driver.
    840  */
    841 static const struct wm_product {
    842 	pci_vendor_id_t		wmp_vendor;
    843 	pci_product_id_t	wmp_product;
    844 	const char		*wmp_name;
    845 	wm_chip_type		wmp_type;
    846 	uint32_t		wmp_flags;
    847 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    848 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    849 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    850 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    851 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    852 } wm_products[] = {
    853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    854 	  "Intel i82542 1000BASE-X Ethernet",
    855 	  WM_T_82542_2_1,	WMP_F_FIBER },
    856 
    857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    858 	  "Intel i82543GC 1000BASE-X Ethernet",
    859 	  WM_T_82543,		WMP_F_FIBER },
    860 
    861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    862 	  "Intel i82543GC 1000BASE-T Ethernet",
    863 	  WM_T_82543,		WMP_F_COPPER },
    864 
    865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    866 	  "Intel i82544EI 1000BASE-T Ethernet",
    867 	  WM_T_82544,		WMP_F_COPPER },
    868 
    869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    870 	  "Intel i82544EI 1000BASE-X Ethernet",
    871 	  WM_T_82544,		WMP_F_FIBER },
    872 
    873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    874 	  "Intel i82544GC 1000BASE-T Ethernet",
    875 	  WM_T_82544,		WMP_F_COPPER },
    876 
    877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    878 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    879 	  WM_T_82544,		WMP_F_COPPER },
    880 
    881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    882 	  "Intel i82540EM 1000BASE-T Ethernet",
    883 	  WM_T_82540,		WMP_F_COPPER },
    884 
    885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    886 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    887 	  WM_T_82540,		WMP_F_COPPER },
    888 
    889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    890 	  "Intel i82540EP 1000BASE-T Ethernet",
    891 	  WM_T_82540,		WMP_F_COPPER },
    892 
    893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    894 	  "Intel i82540EP 1000BASE-T Ethernet",
    895 	  WM_T_82540,		WMP_F_COPPER },
    896 
    897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    898 	  "Intel i82540EP 1000BASE-T Ethernet",
    899 	  WM_T_82540,		WMP_F_COPPER },
    900 
    901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    902 	  "Intel i82545EM 1000BASE-T Ethernet",
    903 	  WM_T_82545,		WMP_F_COPPER },
    904 
    905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    906 	  "Intel i82545GM 1000BASE-T Ethernet",
    907 	  WM_T_82545_3,		WMP_F_COPPER },
    908 
    909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    910 	  "Intel i82545GM 1000BASE-X Ethernet",
    911 	  WM_T_82545_3,		WMP_F_FIBER },
    912 
    913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    914 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    915 	  WM_T_82545_3,		WMP_F_SERDES },
    916 
    917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    918 	  "Intel i82546EB 1000BASE-T Ethernet",
    919 	  WM_T_82546,		WMP_F_COPPER },
    920 
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    922 	  "Intel i82546EB 1000BASE-T Ethernet",
    923 	  WM_T_82546,		WMP_F_COPPER },
    924 
    925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    926 	  "Intel i82545EM 1000BASE-X Ethernet",
    927 	  WM_T_82545,		WMP_F_FIBER },
    928 
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    930 	  "Intel i82546EB 1000BASE-X Ethernet",
    931 	  WM_T_82546,		WMP_F_FIBER },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    934 	  "Intel i82546GB 1000BASE-T Ethernet",
    935 	  WM_T_82546_3,		WMP_F_COPPER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    938 	  "Intel i82546GB 1000BASE-X Ethernet",
    939 	  WM_T_82546_3,		WMP_F_FIBER },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    942 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    943 	  WM_T_82546_3,		WMP_F_SERDES },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    946 	  "i82546GB quad-port Gigabit Ethernet",
    947 	  WM_T_82546_3,		WMP_F_COPPER },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    950 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    951 	  WM_T_82546_3,		WMP_F_COPPER },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    954 	  "Intel PRO/1000MT (82546GB)",
    955 	  WM_T_82546_3,		WMP_F_COPPER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    958 	  "Intel i82541EI 1000BASE-T Ethernet",
    959 	  WM_T_82541,		WMP_F_COPPER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    962 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    963 	  WM_T_82541,		WMP_F_COPPER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    966 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    967 	  WM_T_82541,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    970 	  "Intel i82541ER 1000BASE-T Ethernet",
    971 	  WM_T_82541_2,		WMP_F_COPPER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    974 	  "Intel i82541GI 1000BASE-T Ethernet",
    975 	  WM_T_82541_2,		WMP_F_COPPER },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    978 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    979 	  WM_T_82541_2,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    982 	  "Intel i82541PI 1000BASE-T Ethernet",
    983 	  WM_T_82541_2,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    986 	  "Intel i82547EI 1000BASE-T Ethernet",
    987 	  WM_T_82547,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    990 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    991 	  WM_T_82547,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    994 	  "Intel i82547GI 1000BASE-T Ethernet",
    995 	  WM_T_82547_2,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    998 	  "Intel PRO/1000 PT (82571EB)",
    999 	  WM_T_82571,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1002 	  "Intel PRO/1000 PF (82571EB)",
   1003 	  WM_T_82571,		WMP_F_FIBER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1006 	  "Intel PRO/1000 PB (82571EB)",
   1007 	  WM_T_82571,		WMP_F_SERDES },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1010 	  "Intel PRO/1000 QT (82571EB)",
   1011 	  WM_T_82571,		WMP_F_COPPER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1014 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1015 	  WM_T_82571,		WMP_F_COPPER, },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1018 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1019 	  WM_T_82571,		WMP_F_COPPER, },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1022 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1023 	  WM_T_82571,		WMP_F_SERDES, },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1026 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1027 	  WM_T_82571,		WMP_F_SERDES, },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1030 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1031 	  WM_T_82571,		WMP_F_FIBER, },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1034 	  "Intel i82572EI 1000baseT Ethernet",
   1035 	  WM_T_82572,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1038 	  "Intel i82572EI 1000baseX Ethernet",
   1039 	  WM_T_82572,		WMP_F_FIBER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1042 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1043 	  WM_T_82572,		WMP_F_SERDES },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1046 	  "Intel i82572EI 1000baseT Ethernet",
   1047 	  WM_T_82572,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1050 	  "Intel i82573E",
   1051 	  WM_T_82573,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1054 	  "Intel i82573E IAMT",
   1055 	  WM_T_82573,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1058 	  "Intel i82573L Gigabit Ethernet",
   1059 	  WM_T_82573,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1062 	  "Intel i82574L",
   1063 	  WM_T_82574,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1066 	  "Intel i82574L",
   1067 	  WM_T_82574,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1070 	  "Intel i82583V",
   1071 	  WM_T_82583,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1074 	  "i80003 dual 1000baseT Ethernet",
   1075 	  WM_T_80003,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1078 	  "i80003 dual 1000baseX Ethernet",
   1079 	  WM_T_80003,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1082 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1083 	  WM_T_80003,		WMP_F_SERDES },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1086 	  "Intel i80003 1000baseT Ethernet",
   1087 	  WM_T_80003,		WMP_F_COPPER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1090 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1091 	  WM_T_80003,		WMP_F_SERDES },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1094 	  "Intel i82801H (M_AMT) LAN Controller",
   1095 	  WM_T_ICH8,		WMP_F_COPPER },
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1097 	  "Intel i82801H (AMT) LAN Controller",
   1098 	  WM_T_ICH8,		WMP_F_COPPER },
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1100 	  "Intel i82801H LAN Controller",
   1101 	  WM_T_ICH8,		WMP_F_COPPER },
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1103 	  "Intel i82801H (IFE) LAN Controller",
   1104 	  WM_T_ICH8,		WMP_F_COPPER },
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1106 	  "Intel i82801H (M) LAN Controller",
   1107 	  WM_T_ICH8,		WMP_F_COPPER },
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1109 	  "Intel i82801H IFE (GT) LAN Controller",
   1110 	  WM_T_ICH8,		WMP_F_COPPER },
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1112 	  "Intel i82801H IFE (G) LAN Controller",
   1113 	  WM_T_ICH8,		WMP_F_COPPER },
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1115 	  "82801I (AMT) LAN Controller",
   1116 	  WM_T_ICH9,		WMP_F_COPPER },
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1118 	  "82801I LAN Controller",
   1119 	  WM_T_ICH9,		WMP_F_COPPER },
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1121 	  "82801I (G) LAN Controller",
   1122 	  WM_T_ICH9,		WMP_F_COPPER },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1124 	  "82801I (GT) LAN Controller",
   1125 	  WM_T_ICH9,		WMP_F_COPPER },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1127 	  "82801I (C) LAN Controller",
   1128 	  WM_T_ICH9,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1130 	  "82801I mobile LAN Controller",
   1131 	  WM_T_ICH9,		WMP_F_COPPER },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1133 	  "82801I mobile (V) LAN Controller",
   1134 	  WM_T_ICH9,		WMP_F_COPPER },
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1136 	  "82801I mobile (AMT) LAN Controller",
   1137 	  WM_T_ICH9,		WMP_F_COPPER },
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1139 	  "82567LM-4 LAN Controller",
   1140 	  WM_T_ICH9,		WMP_F_COPPER },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1142 	  "82567V-3 LAN Controller",
   1143 	  WM_T_ICH9,		WMP_F_COPPER },
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1145 	  "82567LM-2 LAN Controller",
   1146 	  WM_T_ICH10,		WMP_F_COPPER },
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1148 	  "82567LF-2 LAN Controller",
   1149 	  WM_T_ICH10,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1151 	  "82567LM-3 LAN Controller",
   1152 	  WM_T_ICH10,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1154 	  "82567LF-3 LAN Controller",
   1155 	  WM_T_ICH10,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1157 	  "82567V-2 LAN Controller",
   1158 	  WM_T_ICH10,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1160 	  "82567V-3? LAN Controller",
   1161 	  WM_T_ICH10,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1163 	  "HANKSVILLE LAN Controller",
   1164 	  WM_T_ICH10,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1166 	  "PCH LAN (82577LM) Controller",
   1167 	  WM_T_PCH,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1169 	  "PCH LAN (82577LC) Controller",
   1170 	  WM_T_PCH,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1172 	  "PCH LAN (82578DM) Controller",
   1173 	  WM_T_PCH,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1175 	  "PCH LAN (82578DC) Controller",
   1176 	  WM_T_PCH,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1178 	  "PCH2 LAN (82579LM) Controller",
   1179 	  WM_T_PCH2,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1181 	  "PCH2 LAN (82579V) Controller",
   1182 	  WM_T_PCH2,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1184 	  "82575EB dual-1000baseT Ethernet",
   1185 	  WM_T_82575,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1187 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1188 	  WM_T_82575,		WMP_F_SERDES },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1190 	  "82575GB quad-1000baseT Ethernet",
   1191 	  WM_T_82575,		WMP_F_COPPER },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1193 	  "82575GB quad-1000baseT Ethernet (PM)",
   1194 	  WM_T_82575,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1196 	  "82576 1000BaseT Ethernet",
   1197 	  WM_T_82576,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1199 	  "82576 1000BaseX Ethernet",
   1200 	  WM_T_82576,		WMP_F_FIBER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1203 	  "82576 gigabit Ethernet (SERDES)",
   1204 	  WM_T_82576,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1207 	  "82576 quad-1000BaseT Ethernet",
   1208 	  WM_T_82576,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1211 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1212 	  WM_T_82576,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1215 	  "82576 gigabit Ethernet",
   1216 	  WM_T_82576,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1219 	  "82576 gigabit Ethernet (SERDES)",
   1220 	  WM_T_82576,		WMP_F_SERDES },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1222 	  "82576 quad-gigabit Ethernet (SERDES)",
   1223 	  WM_T_82576,		WMP_F_SERDES },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1226 	  "82580 1000BaseT Ethernet",
   1227 	  WM_T_82580,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1229 	  "82580 1000BaseX Ethernet",
   1230 	  WM_T_82580,		WMP_F_FIBER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1233 	  "82580 1000BaseT Ethernet (SERDES)",
   1234 	  WM_T_82580,		WMP_F_SERDES },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1237 	  "82580 gigabit Ethernet (SGMII)",
   1238 	  WM_T_82580,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1240 	  "82580 dual-1000BaseT Ethernet",
   1241 	  WM_T_82580,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1244 	  "82580 quad-1000BaseX Ethernet",
   1245 	  WM_T_82580,		WMP_F_FIBER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1248 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1249 	  WM_T_82580,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1252 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1253 	  WM_T_82580,		WMP_F_SERDES },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1256 	  "DH89XXCC 1000BASE-KX Ethernet",
   1257 	  WM_T_82580,		WMP_F_SERDES },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1260 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1261 	  WM_T_82580,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1264 	  "I350 Gigabit Network Connection",
   1265 	  WM_T_I350,		WMP_F_COPPER },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1268 	  "I350 Gigabit Fiber Network Connection",
   1269 	  WM_T_I350,		WMP_F_FIBER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1272 	  "I350 Gigabit Backplane Connection",
   1273 	  WM_T_I350,		WMP_F_SERDES },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1276 	  "I350 Quad Port Gigabit Ethernet",
   1277 	  WM_T_I350,		WMP_F_SERDES },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1280 	  "I350 Gigabit Connection",
   1281 	  WM_T_I350,		WMP_F_COPPER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1284 	  "I354 Gigabit Ethernet (KX)",
   1285 	  WM_T_I354,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1288 	  "I354 Gigabit Ethernet (SGMII)",
   1289 	  WM_T_I354,		WMP_F_COPPER },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1292 	  "I354 Gigabit Ethernet (2.5G)",
   1293 	  WM_T_I354,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1296 	  "I210-T1 Ethernet Server Adapter",
   1297 	  WM_T_I210,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1300 	  "I210 Ethernet (Copper OEM)",
   1301 	  WM_T_I210,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1304 	  "I210 Ethernet (Copper IT)",
   1305 	  WM_T_I210,		WMP_F_COPPER },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1308 	  "I210 Ethernet (FLASH less)",
   1309 	  WM_T_I210,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1312 	  "I210 Gigabit Ethernet (Fiber)",
   1313 	  WM_T_I210,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1316 	  "I210 Gigabit Ethernet (SERDES)",
   1317 	  WM_T_I210,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1320 	  "I210 Gigabit Ethernet (FLASH less)",
   1321 	  WM_T_I210,		WMP_F_SERDES },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1324 	  "I210 Gigabit Ethernet (SGMII)",
   1325 	  WM_T_I210,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1328 	  "I211 Ethernet (COPPER)",
   1329 	  WM_T_I211,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1331 	  "I217 V Ethernet Connection",
   1332 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1334 	  "I217 LM Ethernet Connection",
   1335 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1337 	  "I218 V Ethernet Connection",
   1338 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1340 	  "I218 V Ethernet Connection",
   1341 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1343 	  "I218 V Ethernet Connection",
   1344 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1346 	  "I218 LM Ethernet Connection",
   1347 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1349 	  "I218 LM Ethernet Connection",
   1350 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1352 	  "I218 LM Ethernet Connection",
   1353 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1354 #if 0
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1356 	  "I219 V Ethernet Connection",
   1357 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1359 	  "I219 V Ethernet Connection",
   1360 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1362 	  "I219 LM Ethernet Connection",
   1363 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1365 	  "I219 LM Ethernet Connection",
   1366 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1367 #endif
   1368 	{ 0,			0,
   1369 	  NULL,
   1370 	  0,			0 },
   1371 };
   1372 
   1373 /*
   1374  * Register read/write functions.
   1375  * Other than CSR_{READ|WRITE}().
   1376  */
   1377 
   1378 #if 0 /* Not currently used */
   1379 static inline uint32_t
   1380 wm_io_read(struct wm_softc *sc, int reg)
   1381 {
   1382 
   1383 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1384 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1385 }
   1386 #endif
   1387 
   1388 static inline void
   1389 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1390 {
   1391 
   1392 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1393 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1394 }
   1395 
   1396 static inline void
   1397 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1398     uint32_t data)
   1399 {
   1400 	uint32_t regval;
   1401 	int i;
   1402 
   1403 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1404 
   1405 	CSR_WRITE(sc, reg, regval);
   1406 
   1407 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1408 		delay(5);
   1409 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1410 			break;
   1411 	}
   1412 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1413 		aprint_error("%s: WARNING:"
   1414 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1415 		    device_xname(sc->sc_dev), reg);
   1416 	}
   1417 }
   1418 
   1419 static inline void
   1420 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1421 {
   1422 	wa->wa_low = htole32(v & 0xffffffffU);
   1423 	if (sizeof(bus_addr_t) == 8)
   1424 		wa->wa_high = htole32((uint64_t) v >> 32);
   1425 	else
   1426 		wa->wa_high = 0;
   1427 }
   1428 
   1429 /*
   1430  * Descriptor sync/init functions.
   1431  */
   1432 static inline void
   1433 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1434 {
   1435 	struct wm_softc *sc = txq->txq_sc;
   1436 
   1437 	/* If it will wrap around, sync to the end of the ring. */
   1438 	if ((start + num) > WM_NTXDESC(txq)) {
   1439 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1440 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1441 		    (WM_NTXDESC(txq) - start), ops);
   1442 		num -= (WM_NTXDESC(txq) - start);
   1443 		start = 0;
   1444 	}
   1445 
   1446 	/* Now sync whatever is left. */
   1447 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1448 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1449 }
   1450 
   1451 static inline void
   1452 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1453 {
   1454 	struct wm_softc *sc = rxq->rxq_sc;
   1455 
   1456 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1457 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1458 }
   1459 
   1460 static inline void
   1461 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1462 {
   1463 	struct wm_softc *sc = rxq->rxq_sc;
   1464 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1465 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1466 	struct mbuf *m = rxs->rxs_mbuf;
   1467 
   1468 	/*
   1469 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1470 	 * so that the payload after the Ethernet header is aligned
   1471 	 * to a 4-byte boundary.
   1472 
   1473 	 * XXX BRAINDAMAGE ALERT!
   1474 	 * The stupid chip uses the same size for every buffer, which
   1475 	 * is set in the Receive Control register.  We are using the 2K
   1476 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1477 	 * reason, we can't "scoot" packets longer than the standard
   1478 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1479 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1480 	 * the upper layer copy the headers.
   1481 	 */
   1482 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1483 
   1484 	wm_set_dma_addr(&rxd->wrx_addr,
   1485 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1486 	rxd->wrx_len = 0;
   1487 	rxd->wrx_cksum = 0;
   1488 	rxd->wrx_status = 0;
   1489 	rxd->wrx_errors = 0;
   1490 	rxd->wrx_special = 0;
   1491 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1492 
   1493 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1494 }
   1495 
   1496 /*
   1497  * Device driver interface functions and commonly used functions.
   1498  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1499  */
   1500 
   1501 /* Lookup supported device table */
   1502 static const struct wm_product *
   1503 wm_lookup(const struct pci_attach_args *pa)
   1504 {
   1505 	const struct wm_product *wmp;
   1506 
   1507 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1508 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1509 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1510 			return wmp;
   1511 	}
   1512 	return NULL;
   1513 }
   1514 
   1515 /* The match function (ca_match) */
   1516 static int
   1517 wm_match(device_t parent, cfdata_t cf, void *aux)
   1518 {
   1519 	struct pci_attach_args *pa = aux;
   1520 
   1521 	if (wm_lookup(pa) != NULL)
   1522 		return 1;
   1523 
   1524 	return 0;
   1525 }
   1526 
   1527 /* The attach function (ca_attach) */
   1528 static void
   1529 wm_attach(device_t parent, device_t self, void *aux)
   1530 {
   1531 	struct wm_softc *sc = device_private(self);
   1532 	struct pci_attach_args *pa = aux;
   1533 	prop_dictionary_t dict;
   1534 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1535 	pci_chipset_tag_t pc = pa->pa_pc;
   1536 	int counts[PCI_INTR_TYPE_SIZE];
   1537 	pci_intr_type_t max_type;
   1538 	const char *eetype, *xname;
   1539 	bus_space_tag_t memt;
   1540 	bus_space_handle_t memh;
   1541 	bus_size_t memsize;
   1542 	int memh_valid;
   1543 	int i, error;
   1544 	const struct wm_product *wmp;
   1545 	prop_data_t ea;
   1546 	prop_number_t pn;
   1547 	uint8_t enaddr[ETHER_ADDR_LEN];
   1548 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1549 	pcireg_t preg, memtype;
   1550 	uint16_t eeprom_data, apme_mask;
   1551 	bool force_clear_smbi;
   1552 	uint32_t link_mode;
   1553 	uint32_t reg;
   1554 
   1555 	sc->sc_dev = self;
   1556 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1557 	sc->sc_stopping = false;
   1558 
   1559 	wmp = wm_lookup(pa);
   1560 #ifdef DIAGNOSTIC
   1561 	if (wmp == NULL) {
   1562 		printf("\n");
   1563 		panic("wm_attach: impossible");
   1564 	}
   1565 #endif
   1566 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1567 
   1568 	sc->sc_pc = pa->pa_pc;
   1569 	sc->sc_pcitag = pa->pa_tag;
   1570 
   1571 	if (pci_dma64_available(pa))
   1572 		sc->sc_dmat = pa->pa_dmat64;
   1573 	else
   1574 		sc->sc_dmat = pa->pa_dmat;
   1575 
   1576 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1577 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1578 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1579 
   1580 	sc->sc_type = wmp->wmp_type;
   1581 	if (sc->sc_type < WM_T_82543) {
   1582 		if (sc->sc_rev < 2) {
   1583 			aprint_error_dev(sc->sc_dev,
   1584 			    "i82542 must be at least rev. 2\n");
   1585 			return;
   1586 		}
   1587 		if (sc->sc_rev < 3)
   1588 			sc->sc_type = WM_T_82542_2_0;
   1589 	}
   1590 
   1591 	/*
   1592 	 * Disable MSI for Errata:
   1593 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1594 	 *
   1595 	 *  82544: Errata 25
   1596 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1597 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1598 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1599 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1600 	 *
   1601 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1602 	 *
   1603 	 *  82571 & 82572: Errata 63
   1604 	 */
   1605 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1606 	    || (sc->sc_type == WM_T_82572))
   1607 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1608 
   1609 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1610 	    || (sc->sc_type == WM_T_82580)
   1611 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1612 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1613 		sc->sc_flags |= WM_F_NEWQUEUE;
   1614 
   1615 	/* Set device properties (mactype) */
   1616 	dict = device_properties(sc->sc_dev);
   1617 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1618 
   1619 	/*
   1620 	 * Map the device.  All devices support memory-mapped acccess,
   1621 	 * and it is really required for normal operation.
   1622 	 */
   1623 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1624 	switch (memtype) {
   1625 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1626 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1627 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1628 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1629 		break;
   1630 	default:
   1631 		memh_valid = 0;
   1632 		break;
   1633 	}
   1634 
   1635 	if (memh_valid) {
   1636 		sc->sc_st = memt;
   1637 		sc->sc_sh = memh;
   1638 		sc->sc_ss = memsize;
   1639 	} else {
   1640 		aprint_error_dev(sc->sc_dev,
   1641 		    "unable to map device registers\n");
   1642 		return;
   1643 	}
   1644 
   1645 	/*
   1646 	 * In addition, i82544 and later support I/O mapped indirect
   1647 	 * register access.  It is not desirable (nor supported in
   1648 	 * this driver) to use it for normal operation, though it is
   1649 	 * required to work around bugs in some chip versions.
   1650 	 */
   1651 	if (sc->sc_type >= WM_T_82544) {
   1652 		/* First we have to find the I/O BAR. */
   1653 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1654 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1655 			if (memtype == PCI_MAPREG_TYPE_IO)
   1656 				break;
   1657 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1658 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1659 				i += 4;	/* skip high bits, too */
   1660 		}
   1661 		if (i < PCI_MAPREG_END) {
   1662 			/*
   1663 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1664 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1665 			 * It's no problem because newer chips has no this
   1666 			 * bug.
   1667 			 *
   1668 			 * The i8254x doesn't apparently respond when the
   1669 			 * I/O BAR is 0, which looks somewhat like it's not
   1670 			 * been configured.
   1671 			 */
   1672 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1673 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1674 				aprint_error_dev(sc->sc_dev,
   1675 				    "WARNING: I/O BAR at zero.\n");
   1676 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1677 					0, &sc->sc_iot, &sc->sc_ioh,
   1678 					NULL, &sc->sc_ios) == 0) {
   1679 				sc->sc_flags |= WM_F_IOH_VALID;
   1680 			} else {
   1681 				aprint_error_dev(sc->sc_dev,
   1682 				    "WARNING: unable to map I/O space\n");
   1683 			}
   1684 		}
   1685 
   1686 	}
   1687 
   1688 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1689 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1690 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1691 	if (sc->sc_type < WM_T_82542_2_1)
   1692 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1693 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1694 
   1695 	/* power up chip */
   1696 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1697 	    NULL)) && error != EOPNOTSUPP) {
   1698 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1699 		return;
   1700 	}
   1701 
   1702 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1703 
   1704 	/* Allocation settings */
   1705 	max_type = PCI_INTR_TYPE_MSIX;
   1706 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1707 	counts[PCI_INTR_TYPE_MSI] = 1;
   1708 	counts[PCI_INTR_TYPE_INTX] = 1;
   1709 
   1710 alloc_retry:
   1711 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1712 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1713 		return;
   1714 	}
   1715 
   1716 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1717 		error = wm_setup_msix(sc);
   1718 		if (error) {
   1719 			pci_intr_release(pc, sc->sc_intrs,
   1720 			    counts[PCI_INTR_TYPE_MSIX]);
   1721 
   1722 			/* Setup for MSI: Disable MSI-X */
   1723 			max_type = PCI_INTR_TYPE_MSI;
   1724 			counts[PCI_INTR_TYPE_MSI] = 1;
   1725 			counts[PCI_INTR_TYPE_INTX] = 1;
   1726 			goto alloc_retry;
   1727 		}
   1728 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1729 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1730 		error = wm_setup_legacy(sc);
   1731 		if (error) {
   1732 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1733 			    counts[PCI_INTR_TYPE_MSI]);
   1734 
   1735 			/* The next try is for INTx: Disable MSI */
   1736 			max_type = PCI_INTR_TYPE_INTX;
   1737 			counts[PCI_INTR_TYPE_INTX] = 1;
   1738 			goto alloc_retry;
   1739 		}
   1740 	} else {
   1741 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1742 		error = wm_setup_legacy(sc);
   1743 		if (error) {
   1744 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1745 			    counts[PCI_INTR_TYPE_INTX]);
   1746 			return;
   1747 		}
   1748 	}
   1749 
   1750 	/*
   1751 	 * Check the function ID (unit number of the chip).
   1752 	 */
   1753 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1754 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1755 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1756 	    || (sc->sc_type == WM_T_82580)
   1757 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1758 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1759 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1760 	else
   1761 		sc->sc_funcid = 0;
   1762 
   1763 	/*
   1764 	 * Determine a few things about the bus we're connected to.
   1765 	 */
   1766 	if (sc->sc_type < WM_T_82543) {
   1767 		/* We don't really know the bus characteristics here. */
   1768 		sc->sc_bus_speed = 33;
   1769 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1770 		/*
   1771 		 * CSA (Communication Streaming Architecture) is about as fast
   1772 		 * a 32-bit 66MHz PCI Bus.
   1773 		 */
   1774 		sc->sc_flags |= WM_F_CSA;
   1775 		sc->sc_bus_speed = 66;
   1776 		aprint_verbose_dev(sc->sc_dev,
   1777 		    "Communication Streaming Architecture\n");
   1778 		if (sc->sc_type == WM_T_82547) {
   1779 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1780 			callout_setfunc(&sc->sc_txfifo_ch,
   1781 					wm_82547_txfifo_stall, sc);
   1782 			aprint_verbose_dev(sc->sc_dev,
   1783 			    "using 82547 Tx FIFO stall work-around\n");
   1784 		}
   1785 	} else if (sc->sc_type >= WM_T_82571) {
   1786 		sc->sc_flags |= WM_F_PCIE;
   1787 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1788 		    && (sc->sc_type != WM_T_ICH10)
   1789 		    && (sc->sc_type != WM_T_PCH)
   1790 		    && (sc->sc_type != WM_T_PCH2)
   1791 		    && (sc->sc_type != WM_T_PCH_LPT)
   1792 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1793 			/* ICH* and PCH* have no PCIe capability registers */
   1794 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1795 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1796 				NULL) == 0)
   1797 				aprint_error_dev(sc->sc_dev,
   1798 				    "unable to find PCIe capability\n");
   1799 		}
   1800 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1801 	} else {
   1802 		reg = CSR_READ(sc, WMREG_STATUS);
   1803 		if (reg & STATUS_BUS64)
   1804 			sc->sc_flags |= WM_F_BUS64;
   1805 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1806 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1807 
   1808 			sc->sc_flags |= WM_F_PCIX;
   1809 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1810 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1811 				aprint_error_dev(sc->sc_dev,
   1812 				    "unable to find PCIX capability\n");
   1813 			else if (sc->sc_type != WM_T_82545_3 &&
   1814 				 sc->sc_type != WM_T_82546_3) {
   1815 				/*
   1816 				 * Work around a problem caused by the BIOS
   1817 				 * setting the max memory read byte count
   1818 				 * incorrectly.
   1819 				 */
   1820 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1821 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1822 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1823 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1824 
   1825 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1826 				    PCIX_CMD_BYTECNT_SHIFT;
   1827 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1828 				    PCIX_STATUS_MAXB_SHIFT;
   1829 				if (bytecnt > maxb) {
   1830 					aprint_verbose_dev(sc->sc_dev,
   1831 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1832 					    512 << bytecnt, 512 << maxb);
   1833 					pcix_cmd = (pcix_cmd &
   1834 					    ~PCIX_CMD_BYTECNT_MASK) |
   1835 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1836 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1837 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1838 					    pcix_cmd);
   1839 				}
   1840 			}
   1841 		}
   1842 		/*
   1843 		 * The quad port adapter is special; it has a PCIX-PCIX
   1844 		 * bridge on the board, and can run the secondary bus at
   1845 		 * a higher speed.
   1846 		 */
   1847 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1848 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1849 								      : 66;
   1850 		} else if (sc->sc_flags & WM_F_PCIX) {
   1851 			switch (reg & STATUS_PCIXSPD_MASK) {
   1852 			case STATUS_PCIXSPD_50_66:
   1853 				sc->sc_bus_speed = 66;
   1854 				break;
   1855 			case STATUS_PCIXSPD_66_100:
   1856 				sc->sc_bus_speed = 100;
   1857 				break;
   1858 			case STATUS_PCIXSPD_100_133:
   1859 				sc->sc_bus_speed = 133;
   1860 				break;
   1861 			default:
   1862 				aprint_error_dev(sc->sc_dev,
   1863 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1864 				    reg & STATUS_PCIXSPD_MASK);
   1865 				sc->sc_bus_speed = 66;
   1866 				break;
   1867 			}
   1868 		} else
   1869 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1870 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1871 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1872 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1873 	}
   1874 
   1875 	/* clear interesting stat counters */
   1876 	CSR_READ(sc, WMREG_COLC);
   1877 	CSR_READ(sc, WMREG_RXERRC);
   1878 
   1879 	/* get PHY control from SMBus to PCIe */
   1880 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1881 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1882 		wm_smbustopci(sc);
   1883 
   1884 	/* Reset the chip to a known state. */
   1885 	wm_reset(sc);
   1886 
   1887 	/* Get some information about the EEPROM. */
   1888 	switch (sc->sc_type) {
   1889 	case WM_T_82542_2_0:
   1890 	case WM_T_82542_2_1:
   1891 	case WM_T_82543:
   1892 	case WM_T_82544:
   1893 		/* Microwire */
   1894 		sc->sc_nvm_wordsize = 64;
   1895 		sc->sc_nvm_addrbits = 6;
   1896 		break;
   1897 	case WM_T_82540:
   1898 	case WM_T_82545:
   1899 	case WM_T_82545_3:
   1900 	case WM_T_82546:
   1901 	case WM_T_82546_3:
   1902 		/* Microwire */
   1903 		reg = CSR_READ(sc, WMREG_EECD);
   1904 		if (reg & EECD_EE_SIZE) {
   1905 			sc->sc_nvm_wordsize = 256;
   1906 			sc->sc_nvm_addrbits = 8;
   1907 		} else {
   1908 			sc->sc_nvm_wordsize = 64;
   1909 			sc->sc_nvm_addrbits = 6;
   1910 		}
   1911 		sc->sc_flags |= WM_F_LOCK_EECD;
   1912 		break;
   1913 	case WM_T_82541:
   1914 	case WM_T_82541_2:
   1915 	case WM_T_82547:
   1916 	case WM_T_82547_2:
   1917 		sc->sc_flags |= WM_F_LOCK_EECD;
   1918 		reg = CSR_READ(sc, WMREG_EECD);
   1919 		if (reg & EECD_EE_TYPE) {
   1920 			/* SPI */
   1921 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1922 			wm_nvm_set_addrbits_size_eecd(sc);
   1923 		} else {
   1924 			/* Microwire */
   1925 			if ((reg & EECD_EE_ABITS) != 0) {
   1926 				sc->sc_nvm_wordsize = 256;
   1927 				sc->sc_nvm_addrbits = 8;
   1928 			} else {
   1929 				sc->sc_nvm_wordsize = 64;
   1930 				sc->sc_nvm_addrbits = 6;
   1931 			}
   1932 		}
   1933 		break;
   1934 	case WM_T_82571:
   1935 	case WM_T_82572:
   1936 		/* SPI */
   1937 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1938 		wm_nvm_set_addrbits_size_eecd(sc);
   1939 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1940 		break;
   1941 	case WM_T_82573:
   1942 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1943 		/* FALLTHROUGH */
   1944 	case WM_T_82574:
   1945 	case WM_T_82583:
   1946 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1947 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1948 			sc->sc_nvm_wordsize = 2048;
   1949 		} else {
   1950 			/* SPI */
   1951 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1952 			wm_nvm_set_addrbits_size_eecd(sc);
   1953 		}
   1954 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1955 		break;
   1956 	case WM_T_82575:
   1957 	case WM_T_82576:
   1958 	case WM_T_82580:
   1959 	case WM_T_I350:
   1960 	case WM_T_I354:
   1961 	case WM_T_80003:
   1962 		/* SPI */
   1963 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1964 		wm_nvm_set_addrbits_size_eecd(sc);
   1965 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1966 		    | WM_F_LOCK_SWSM;
   1967 		break;
   1968 	case WM_T_ICH8:
   1969 	case WM_T_ICH9:
   1970 	case WM_T_ICH10:
   1971 	case WM_T_PCH:
   1972 	case WM_T_PCH2:
   1973 	case WM_T_PCH_LPT:
   1974 		/* FLASH */
   1975 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1976 		sc->sc_nvm_wordsize = 2048;
   1977 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1978 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1979 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1980 			aprint_error_dev(sc->sc_dev,
   1981 			    "can't map FLASH registers\n");
   1982 			goto out;
   1983 		}
   1984 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1985 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1986 		    ICH_FLASH_SECTOR_SIZE;
   1987 		sc->sc_ich8_flash_bank_size =
   1988 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1989 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1990 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1991 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1992 		sc->sc_flashreg_offset = 0;
   1993 		break;
   1994 	case WM_T_PCH_SPT:
   1995 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1996 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1997 		sc->sc_flasht = sc->sc_st;
   1998 		sc->sc_flashh = sc->sc_sh;
   1999 		sc->sc_ich8_flash_base = 0;
   2000 		sc->sc_nvm_wordsize =
   2001 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2002 			* NVM_SIZE_MULTIPLIER;
   2003 		/* It is size in bytes, we want words */
   2004 		sc->sc_nvm_wordsize /= 2;
   2005 		/* assume 2 banks */
   2006 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2007 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2008 		break;
   2009 	case WM_T_I210:
   2010 	case WM_T_I211:
   2011 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2012 			wm_nvm_set_addrbits_size_eecd(sc);
   2013 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2014 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   2015 		} else {
   2016 			sc->sc_nvm_wordsize = INVM_SIZE;
   2017 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2018 			sc->sc_flags |= WM_F_LOCK_SWFW;
   2019 		}
   2020 		break;
   2021 	default:
   2022 		break;
   2023 	}
   2024 
   2025 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2026 	switch (sc->sc_type) {
   2027 	case WM_T_82571:
   2028 	case WM_T_82572:
   2029 		reg = CSR_READ(sc, WMREG_SWSM2);
   2030 		if ((reg & SWSM2_LOCK) == 0) {
   2031 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2032 			force_clear_smbi = true;
   2033 		} else
   2034 			force_clear_smbi = false;
   2035 		break;
   2036 	case WM_T_82573:
   2037 	case WM_T_82574:
   2038 	case WM_T_82583:
   2039 		force_clear_smbi = true;
   2040 		break;
   2041 	default:
   2042 		force_clear_smbi = false;
   2043 		break;
   2044 	}
   2045 	if (force_clear_smbi) {
   2046 		reg = CSR_READ(sc, WMREG_SWSM);
   2047 		if ((reg & SWSM_SMBI) != 0)
   2048 			aprint_error_dev(sc->sc_dev,
   2049 			    "Please update the Bootagent\n");
   2050 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2051 	}
   2052 
   2053 	/*
   2054 	 * Defer printing the EEPROM type until after verifying the checksum
   2055 	 * This allows the EEPROM type to be printed correctly in the case
   2056 	 * that no EEPROM is attached.
   2057 	 */
   2058 	/*
   2059 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2060 	 * this for later, so we can fail future reads from the EEPROM.
   2061 	 */
   2062 	if (wm_nvm_validate_checksum(sc)) {
   2063 		/*
   2064 		 * Read twice again because some PCI-e parts fail the
   2065 		 * first check due to the link being in sleep state.
   2066 		 */
   2067 		if (wm_nvm_validate_checksum(sc))
   2068 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2069 	}
   2070 
   2071 	/* Set device properties (macflags) */
   2072 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2073 
   2074 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2075 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2076 	else {
   2077 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2078 		    sc->sc_nvm_wordsize);
   2079 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2080 			aprint_verbose("iNVM");
   2081 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2082 			aprint_verbose("FLASH(HW)");
   2083 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2084 			aprint_verbose("FLASH");
   2085 		else {
   2086 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2087 				eetype = "SPI";
   2088 			else
   2089 				eetype = "MicroWire";
   2090 			aprint_verbose("(%d address bits) %s EEPROM",
   2091 			    sc->sc_nvm_addrbits, eetype);
   2092 		}
   2093 	}
   2094 	wm_nvm_version(sc);
   2095 	aprint_verbose("\n");
   2096 
   2097 	/* Check for I21[01] PLL workaround */
   2098 	if (sc->sc_type == WM_T_I210)
   2099 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2100 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2101 		/* NVM image release 3.25 has a workaround */
   2102 		if ((sc->sc_nvm_ver_major < 3)
   2103 		    || ((sc->sc_nvm_ver_major == 3)
   2104 			&& (sc->sc_nvm_ver_minor < 25))) {
   2105 			aprint_verbose_dev(sc->sc_dev,
   2106 			    "ROM image version %d.%d is older than 3.25\n",
   2107 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2108 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2109 		}
   2110 	}
   2111 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2112 		wm_pll_workaround_i210(sc);
   2113 
   2114 	wm_get_wakeup(sc);
   2115 	switch (sc->sc_type) {
   2116 	case WM_T_82571:
   2117 	case WM_T_82572:
   2118 	case WM_T_82573:
   2119 	case WM_T_82574:
   2120 	case WM_T_82583:
   2121 	case WM_T_80003:
   2122 	case WM_T_ICH8:
   2123 	case WM_T_ICH9:
   2124 	case WM_T_ICH10:
   2125 	case WM_T_PCH:
   2126 	case WM_T_PCH2:
   2127 	case WM_T_PCH_LPT:
   2128 	case WM_T_PCH_SPT:
   2129 		/* Non-AMT based hardware can now take control from firmware */
   2130 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2131 			wm_get_hw_control(sc);
   2132 		break;
   2133 	default:
   2134 		break;
   2135 	}
   2136 
   2137 	/*
   2138 	 * Read the Ethernet address from the EEPROM, if not first found
   2139 	 * in device properties.
   2140 	 */
   2141 	ea = prop_dictionary_get(dict, "mac-address");
   2142 	if (ea != NULL) {
   2143 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2144 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2145 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2146 	} else {
   2147 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2148 			aprint_error_dev(sc->sc_dev,
   2149 			    "unable to read Ethernet address\n");
   2150 			goto out;
   2151 		}
   2152 	}
   2153 
   2154 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2155 	    ether_sprintf(enaddr));
   2156 
   2157 	/*
   2158 	 * Read the config info from the EEPROM, and set up various
   2159 	 * bits in the control registers based on their contents.
   2160 	 */
   2161 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2162 	if (pn != NULL) {
   2163 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2164 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2165 	} else {
   2166 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2167 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2168 			goto out;
   2169 		}
   2170 	}
   2171 
   2172 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2173 	if (pn != NULL) {
   2174 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2175 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2176 	} else {
   2177 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2178 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2179 			goto out;
   2180 		}
   2181 	}
   2182 
   2183 	/* check for WM_F_WOL */
   2184 	switch (sc->sc_type) {
   2185 	case WM_T_82542_2_0:
   2186 	case WM_T_82542_2_1:
   2187 	case WM_T_82543:
   2188 		/* dummy? */
   2189 		eeprom_data = 0;
   2190 		apme_mask = NVM_CFG3_APME;
   2191 		break;
   2192 	case WM_T_82544:
   2193 		apme_mask = NVM_CFG2_82544_APM_EN;
   2194 		eeprom_data = cfg2;
   2195 		break;
   2196 	case WM_T_82546:
   2197 	case WM_T_82546_3:
   2198 	case WM_T_82571:
   2199 	case WM_T_82572:
   2200 	case WM_T_82573:
   2201 	case WM_T_82574:
   2202 	case WM_T_82583:
   2203 	case WM_T_80003:
   2204 	default:
   2205 		apme_mask = NVM_CFG3_APME;
   2206 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2207 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2208 		break;
   2209 	case WM_T_82575:
   2210 	case WM_T_82576:
   2211 	case WM_T_82580:
   2212 	case WM_T_I350:
   2213 	case WM_T_I354: /* XXX ok? */
   2214 	case WM_T_ICH8:
   2215 	case WM_T_ICH9:
   2216 	case WM_T_ICH10:
   2217 	case WM_T_PCH:
   2218 	case WM_T_PCH2:
   2219 	case WM_T_PCH_LPT:
   2220 	case WM_T_PCH_SPT:
   2221 		/* XXX The funcid should be checked on some devices */
   2222 		apme_mask = WUC_APME;
   2223 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2224 		break;
   2225 	}
   2226 
   2227 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2228 	if ((eeprom_data & apme_mask) != 0)
   2229 		sc->sc_flags |= WM_F_WOL;
   2230 #ifdef WM_DEBUG
   2231 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2232 		printf("WOL\n");
   2233 #endif
   2234 
   2235 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2236 		/* Check NVM for autonegotiation */
   2237 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2238 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2239 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2240 		}
   2241 	}
   2242 
   2243 	/*
   2244 	 * XXX need special handling for some multiple port cards
   2245 	 * to disable a paticular port.
   2246 	 */
   2247 
   2248 	if (sc->sc_type >= WM_T_82544) {
   2249 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2250 		if (pn != NULL) {
   2251 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2252 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2253 		} else {
   2254 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2255 				aprint_error_dev(sc->sc_dev,
   2256 				    "unable to read SWDPIN\n");
   2257 				goto out;
   2258 			}
   2259 		}
   2260 	}
   2261 
   2262 	if (cfg1 & NVM_CFG1_ILOS)
   2263 		sc->sc_ctrl |= CTRL_ILOS;
   2264 
   2265 	/*
   2266 	 * XXX
   2267 	 * This code isn't correct because pin 2 and 3 are located
   2268 	 * in different position on newer chips. Check all datasheet.
   2269 	 *
   2270 	 * Until resolve this problem, check if a chip < 82580
   2271 	 */
   2272 	if (sc->sc_type <= WM_T_82580) {
   2273 		if (sc->sc_type >= WM_T_82544) {
   2274 			sc->sc_ctrl |=
   2275 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2276 			    CTRL_SWDPIO_SHIFT;
   2277 			sc->sc_ctrl |=
   2278 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2279 			    CTRL_SWDPINS_SHIFT;
   2280 		} else {
   2281 			sc->sc_ctrl |=
   2282 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2283 			    CTRL_SWDPIO_SHIFT;
   2284 		}
   2285 	}
   2286 
   2287 	/* XXX For other than 82580? */
   2288 	if (sc->sc_type == WM_T_82580) {
   2289 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2290 		if (nvmword & __BIT(13))
   2291 			sc->sc_ctrl |= CTRL_ILOS;
   2292 	}
   2293 
   2294 #if 0
   2295 	if (sc->sc_type >= WM_T_82544) {
   2296 		if (cfg1 & NVM_CFG1_IPS0)
   2297 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2298 		if (cfg1 & NVM_CFG1_IPS1)
   2299 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2300 		sc->sc_ctrl_ext |=
   2301 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2302 		    CTRL_EXT_SWDPIO_SHIFT;
   2303 		sc->sc_ctrl_ext |=
   2304 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2305 		    CTRL_EXT_SWDPINS_SHIFT;
   2306 	} else {
   2307 		sc->sc_ctrl_ext |=
   2308 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2309 		    CTRL_EXT_SWDPIO_SHIFT;
   2310 	}
   2311 #endif
   2312 
   2313 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2314 #if 0
   2315 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2316 #endif
   2317 
   2318 	if (sc->sc_type == WM_T_PCH) {
   2319 		uint16_t val;
   2320 
   2321 		/* Save the NVM K1 bit setting */
   2322 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2323 
   2324 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2325 			sc->sc_nvm_k1_enabled = 1;
   2326 		else
   2327 			sc->sc_nvm_k1_enabled = 0;
   2328 	}
   2329 
   2330 	/*
   2331 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2332 	 * media structures accordingly.
   2333 	 */
   2334 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2335 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2336 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2337 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2338 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2339 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2340 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2341 	} else if (sc->sc_type < WM_T_82543 ||
   2342 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2343 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2344 			aprint_error_dev(sc->sc_dev,
   2345 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2346 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2347 		}
   2348 		wm_tbi_mediainit(sc);
   2349 	} else {
   2350 		switch (sc->sc_type) {
   2351 		case WM_T_82575:
   2352 		case WM_T_82576:
   2353 		case WM_T_82580:
   2354 		case WM_T_I350:
   2355 		case WM_T_I354:
   2356 		case WM_T_I210:
   2357 		case WM_T_I211:
   2358 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2359 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2360 			switch (link_mode) {
   2361 			case CTRL_EXT_LINK_MODE_1000KX:
   2362 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2363 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2364 				break;
   2365 			case CTRL_EXT_LINK_MODE_SGMII:
   2366 				if (wm_sgmii_uses_mdio(sc)) {
   2367 					aprint_verbose_dev(sc->sc_dev,
   2368 					    "SGMII(MDIO)\n");
   2369 					sc->sc_flags |= WM_F_SGMII;
   2370 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2371 					break;
   2372 				}
   2373 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2374 				/*FALLTHROUGH*/
   2375 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2376 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2377 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2378 					if (link_mode
   2379 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2380 						sc->sc_mediatype
   2381 						    = WM_MEDIATYPE_COPPER;
   2382 						sc->sc_flags |= WM_F_SGMII;
   2383 					} else {
   2384 						sc->sc_mediatype
   2385 						    = WM_MEDIATYPE_SERDES;
   2386 						aprint_verbose_dev(sc->sc_dev,
   2387 						    "SERDES\n");
   2388 					}
   2389 					break;
   2390 				}
   2391 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2392 					aprint_verbose_dev(sc->sc_dev,
   2393 					    "SERDES\n");
   2394 
   2395 				/* Change current link mode setting */
   2396 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2397 				switch (sc->sc_mediatype) {
   2398 				case WM_MEDIATYPE_COPPER:
   2399 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2400 					break;
   2401 				case WM_MEDIATYPE_SERDES:
   2402 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2403 					break;
   2404 				default:
   2405 					break;
   2406 				}
   2407 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2408 				break;
   2409 			case CTRL_EXT_LINK_MODE_GMII:
   2410 			default:
   2411 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2412 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2413 				break;
   2414 			}
   2415 
   2416 			reg &= ~CTRL_EXT_I2C_ENA;
   2417 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2418 				reg |= CTRL_EXT_I2C_ENA;
   2419 			else
   2420 				reg &= ~CTRL_EXT_I2C_ENA;
   2421 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2422 
   2423 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2424 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2425 			else
   2426 				wm_tbi_mediainit(sc);
   2427 			break;
   2428 		default:
   2429 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2430 				aprint_error_dev(sc->sc_dev,
   2431 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2432 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2433 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2434 		}
   2435 	}
   2436 
   2437 	ifp = &sc->sc_ethercom.ec_if;
   2438 	xname = device_xname(sc->sc_dev);
   2439 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2440 	ifp->if_softc = sc;
   2441 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2442 	ifp->if_extflags = IFEF_START_MPSAFE;
   2443 	ifp->if_ioctl = wm_ioctl;
   2444 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2445 		ifp->if_start = wm_nq_start;
   2446 		if (sc->sc_nqueues > 1)
   2447 			ifp->if_transmit = wm_nq_transmit;
   2448 	} else
   2449 		ifp->if_start = wm_start;
   2450 	ifp->if_watchdog = wm_watchdog;
   2451 	ifp->if_init = wm_init;
   2452 	ifp->if_stop = wm_stop;
   2453 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2454 	IFQ_SET_READY(&ifp->if_snd);
   2455 
   2456 	/* Check for jumbo frame */
   2457 	switch (sc->sc_type) {
   2458 	case WM_T_82573:
   2459 		/* XXX limited to 9234 if ASPM is disabled */
   2460 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2461 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2462 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2463 		break;
   2464 	case WM_T_82571:
   2465 	case WM_T_82572:
   2466 	case WM_T_82574:
   2467 	case WM_T_82575:
   2468 	case WM_T_82576:
   2469 	case WM_T_82580:
   2470 	case WM_T_I350:
   2471 	case WM_T_I354: /* XXXX ok? */
   2472 	case WM_T_I210:
   2473 	case WM_T_I211:
   2474 	case WM_T_80003:
   2475 	case WM_T_ICH9:
   2476 	case WM_T_ICH10:
   2477 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2478 	case WM_T_PCH_LPT:
   2479 	case WM_T_PCH_SPT:
   2480 		/* XXX limited to 9234 */
   2481 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2482 		break;
   2483 	case WM_T_PCH:
   2484 		/* XXX limited to 4096 */
   2485 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2486 		break;
   2487 	case WM_T_82542_2_0:
   2488 	case WM_T_82542_2_1:
   2489 	case WM_T_82583:
   2490 	case WM_T_ICH8:
   2491 		/* No support for jumbo frame */
   2492 		break;
   2493 	default:
   2494 		/* ETHER_MAX_LEN_JUMBO */
   2495 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2496 		break;
   2497 	}
   2498 
   2499 	/* If we're a i82543 or greater, we can support VLANs. */
   2500 	if (sc->sc_type >= WM_T_82543)
   2501 		sc->sc_ethercom.ec_capabilities |=
   2502 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2503 
   2504 	/*
   2505 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2506 	 * on i82543 and later.
   2507 	 */
   2508 	if (sc->sc_type >= WM_T_82543) {
   2509 		ifp->if_capabilities |=
   2510 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2511 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2512 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2513 		    IFCAP_CSUM_TCPv6_Tx |
   2514 		    IFCAP_CSUM_UDPv6_Tx;
   2515 	}
   2516 
   2517 	/*
   2518 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2519 	 *
   2520 	 *	82541GI (8086:1076) ... no
   2521 	 *	82572EI (8086:10b9) ... yes
   2522 	 */
   2523 	if (sc->sc_type >= WM_T_82571) {
   2524 		ifp->if_capabilities |=
   2525 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2526 	}
   2527 
   2528 	/*
   2529 	 * If we're a i82544 or greater (except i82547), we can do
   2530 	 * TCP segmentation offload.
   2531 	 */
   2532 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2533 		ifp->if_capabilities |= IFCAP_TSOv4;
   2534 	}
   2535 
   2536 	if (sc->sc_type >= WM_T_82571) {
   2537 		ifp->if_capabilities |= IFCAP_TSOv6;
   2538 	}
   2539 
   2540 #ifdef WM_MPSAFE
   2541 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2542 #else
   2543 	sc->sc_core_lock = NULL;
   2544 #endif
   2545 
   2546 	/* Attach the interface. */
   2547 	if_initialize(ifp);
   2548 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2549 	ether_ifattach(ifp, enaddr);
   2550 	if_register(ifp);
   2551 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2552 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2553 			  RND_FLAG_DEFAULT);
   2554 
   2555 #ifdef WM_EVENT_COUNTERS
   2556 	/* Attach event counters. */
   2557 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2558 	    NULL, xname, "linkintr");
   2559 
   2560 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2561 	    NULL, xname, "tx_xoff");
   2562 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2563 	    NULL, xname, "tx_xon");
   2564 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2565 	    NULL, xname, "rx_xoff");
   2566 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2567 	    NULL, xname, "rx_xon");
   2568 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2569 	    NULL, xname, "rx_macctl");
   2570 #endif /* WM_EVENT_COUNTERS */
   2571 
   2572 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2573 		pmf_class_network_register(self, ifp);
   2574 	else
   2575 		aprint_error_dev(self, "couldn't establish power handler\n");
   2576 
   2577 	sc->sc_flags |= WM_F_ATTACHED;
   2578  out:
   2579 	return;
   2580 }
   2581 
   2582 /* The detach function (ca_detach) */
   2583 static int
   2584 wm_detach(device_t self, int flags __unused)
   2585 {
   2586 	struct wm_softc *sc = device_private(self);
   2587 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2588 	int i;
   2589 
   2590 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2591 		return 0;
   2592 
   2593 	/* Stop the interface. Callouts are stopped in it. */
   2594 	wm_stop(ifp, 1);
   2595 
   2596 	pmf_device_deregister(self);
   2597 
   2598 	/* Tell the firmware about the release */
   2599 	WM_CORE_LOCK(sc);
   2600 	wm_release_manageability(sc);
   2601 	wm_release_hw_control(sc);
   2602 	WM_CORE_UNLOCK(sc);
   2603 
   2604 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2605 
   2606 	/* Delete all remaining media. */
   2607 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2608 
   2609 	ether_ifdetach(ifp);
   2610 	if_detach(ifp);
   2611 	if_percpuq_destroy(sc->sc_ipq);
   2612 
   2613 	/* Unload RX dmamaps and free mbufs */
   2614 	for (i = 0; i < sc->sc_nqueues; i++) {
   2615 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2616 		mutex_enter(rxq->rxq_lock);
   2617 		wm_rxdrain(rxq);
   2618 		mutex_exit(rxq->rxq_lock);
   2619 	}
   2620 	/* Must unlock here */
   2621 
   2622 	/* Disestablish the interrupt handler */
   2623 	for (i = 0; i < sc->sc_nintrs; i++) {
   2624 		if (sc->sc_ihs[i] != NULL) {
   2625 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2626 			sc->sc_ihs[i] = NULL;
   2627 		}
   2628 	}
   2629 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2630 
   2631 	wm_free_txrx_queues(sc);
   2632 
   2633 	/* Unmap the registers */
   2634 	if (sc->sc_ss) {
   2635 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2636 		sc->sc_ss = 0;
   2637 	}
   2638 	if (sc->sc_ios) {
   2639 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2640 		sc->sc_ios = 0;
   2641 	}
   2642 	if (sc->sc_flashs) {
   2643 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2644 		sc->sc_flashs = 0;
   2645 	}
   2646 
   2647 	if (sc->sc_core_lock)
   2648 		mutex_obj_free(sc->sc_core_lock);
   2649 
   2650 	return 0;
   2651 }
   2652 
   2653 static bool
   2654 wm_suspend(device_t self, const pmf_qual_t *qual)
   2655 {
   2656 	struct wm_softc *sc = device_private(self);
   2657 
   2658 	wm_release_manageability(sc);
   2659 	wm_release_hw_control(sc);
   2660 #ifdef WM_WOL
   2661 	wm_enable_wakeup(sc);
   2662 #endif
   2663 
   2664 	return true;
   2665 }
   2666 
   2667 static bool
   2668 wm_resume(device_t self, const pmf_qual_t *qual)
   2669 {
   2670 	struct wm_softc *sc = device_private(self);
   2671 
   2672 	wm_init_manageability(sc);
   2673 
   2674 	return true;
   2675 }
   2676 
   2677 /*
   2678  * wm_watchdog:		[ifnet interface function]
   2679  *
   2680  *	Watchdog timer handler.
   2681  */
   2682 static void
   2683 wm_watchdog(struct ifnet *ifp)
   2684 {
   2685 	int qid;
   2686 	struct wm_softc *sc = ifp->if_softc;
   2687 
   2688 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2689 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2690 
   2691 		wm_watchdog_txq(ifp, txq);
   2692 	}
   2693 
   2694 	/* Reset the interface. */
   2695 	(void) wm_init(ifp);
   2696 
   2697 	/*
   2698 	 * There are still some upper layer processing which call
   2699 	 * ifp->if_start(). e.g. ALTQ
   2700 	 */
   2701 	/* Try to get more packets going. */
   2702 	ifp->if_start(ifp);
   2703 }
   2704 
   2705 static void
   2706 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2707 {
   2708 	struct wm_softc *sc = ifp->if_softc;
   2709 
   2710 	/*
   2711 	 * Since we're using delayed interrupts, sweep up
   2712 	 * before we report an error.
   2713 	 */
   2714 	mutex_enter(txq->txq_lock);
   2715 	wm_txeof(sc, txq);
   2716 	mutex_exit(txq->txq_lock);
   2717 
   2718 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2719 #ifdef WM_DEBUG
   2720 		int i, j;
   2721 		struct wm_txsoft *txs;
   2722 #endif
   2723 		log(LOG_ERR,
   2724 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2725 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2726 		    txq->txq_next);
   2727 		ifp->if_oerrors++;
   2728 #ifdef WM_DEBUG
   2729 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2730 		    i = WM_NEXTTXS(txq, i)) {
   2731 		    txs = &txq->txq_soft[i];
   2732 		    printf("txs %d tx %d -> %d\n",
   2733 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2734 		    for (j = txs->txs_firstdesc; ;
   2735 			j = WM_NEXTTX(txq, j)) {
   2736 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2737 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2738 			printf("\t %#08x%08x\n",
   2739 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2740 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2741 			if (j == txs->txs_lastdesc)
   2742 				break;
   2743 			}
   2744 		}
   2745 #endif
   2746 	}
   2747 }
   2748 
   2749 /*
   2750  * wm_tick:
   2751  *
   2752  *	One second timer, used to check link status, sweep up
   2753  *	completed transmit jobs, etc.
   2754  */
   2755 static void
   2756 wm_tick(void *arg)
   2757 {
   2758 	struct wm_softc *sc = arg;
   2759 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2760 #ifndef WM_MPSAFE
   2761 	int s = splnet();
   2762 #endif
   2763 
   2764 	WM_CORE_LOCK(sc);
   2765 
   2766 	if (sc->sc_stopping)
   2767 		goto out;
   2768 
   2769 	if (sc->sc_type >= WM_T_82542_2_1) {
   2770 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2771 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2772 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2773 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2774 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2775 	}
   2776 
   2777 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2778 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2779 	    + CSR_READ(sc, WMREG_CRCERRS)
   2780 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2781 	    + CSR_READ(sc, WMREG_SYMERRC)
   2782 	    + CSR_READ(sc, WMREG_RXERRC)
   2783 	    + CSR_READ(sc, WMREG_SEC)
   2784 	    + CSR_READ(sc, WMREG_CEXTERR)
   2785 	    + CSR_READ(sc, WMREG_RLEC);
   2786 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2787 
   2788 	if (sc->sc_flags & WM_F_HAS_MII)
   2789 		mii_tick(&sc->sc_mii);
   2790 	else if ((sc->sc_type >= WM_T_82575)
   2791 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2792 		wm_serdes_tick(sc);
   2793 	else
   2794 		wm_tbi_tick(sc);
   2795 
   2796 out:
   2797 	WM_CORE_UNLOCK(sc);
   2798 #ifndef WM_MPSAFE
   2799 	splx(s);
   2800 #endif
   2801 
   2802 	if (!sc->sc_stopping)
   2803 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2804 }
   2805 
   2806 static int
   2807 wm_ifflags_cb(struct ethercom *ec)
   2808 {
   2809 	struct ifnet *ifp = &ec->ec_if;
   2810 	struct wm_softc *sc = ifp->if_softc;
   2811 	int rc = 0;
   2812 
   2813 	WM_CORE_LOCK(sc);
   2814 
   2815 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2816 	sc->sc_if_flags = ifp->if_flags;
   2817 
   2818 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2819 		rc = ENETRESET;
   2820 		goto out;
   2821 	}
   2822 
   2823 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2824 		wm_set_filter(sc);
   2825 
   2826 	wm_set_vlan(sc);
   2827 
   2828 out:
   2829 	WM_CORE_UNLOCK(sc);
   2830 
   2831 	return rc;
   2832 }
   2833 
   2834 /*
   2835  * wm_ioctl:		[ifnet interface function]
   2836  *
   2837  *	Handle control requests from the operator.
   2838  */
   2839 static int
   2840 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2841 {
   2842 	struct wm_softc *sc = ifp->if_softc;
   2843 	struct ifreq *ifr = (struct ifreq *) data;
   2844 	struct ifaddr *ifa = (struct ifaddr *)data;
   2845 	struct sockaddr_dl *sdl;
   2846 	int s, error;
   2847 
   2848 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2849 		device_xname(sc->sc_dev), __func__));
   2850 
   2851 #ifndef WM_MPSAFE
   2852 	s = splnet();
   2853 #endif
   2854 	switch (cmd) {
   2855 	case SIOCSIFMEDIA:
   2856 	case SIOCGIFMEDIA:
   2857 		WM_CORE_LOCK(sc);
   2858 		/* Flow control requires full-duplex mode. */
   2859 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2860 		    (ifr->ifr_media & IFM_FDX) == 0)
   2861 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2862 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2863 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2864 				/* We can do both TXPAUSE and RXPAUSE. */
   2865 				ifr->ifr_media |=
   2866 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2867 			}
   2868 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2869 		}
   2870 		WM_CORE_UNLOCK(sc);
   2871 #ifdef WM_MPSAFE
   2872 		s = splnet();
   2873 #endif
   2874 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2875 #ifdef WM_MPSAFE
   2876 		splx(s);
   2877 #endif
   2878 		break;
   2879 	case SIOCINITIFADDR:
   2880 		WM_CORE_LOCK(sc);
   2881 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2882 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2883 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2884 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2885 			/* unicast address is first multicast entry */
   2886 			wm_set_filter(sc);
   2887 			error = 0;
   2888 			WM_CORE_UNLOCK(sc);
   2889 			break;
   2890 		}
   2891 		WM_CORE_UNLOCK(sc);
   2892 		/*FALLTHROUGH*/
   2893 	default:
   2894 #ifdef WM_MPSAFE
   2895 		s = splnet();
   2896 #endif
   2897 		/* It may call wm_start, so unlock here */
   2898 		error = ether_ioctl(ifp, cmd, data);
   2899 #ifdef WM_MPSAFE
   2900 		splx(s);
   2901 #endif
   2902 		if (error != ENETRESET)
   2903 			break;
   2904 
   2905 		error = 0;
   2906 
   2907 		if (cmd == SIOCSIFCAP) {
   2908 			error = (*ifp->if_init)(ifp);
   2909 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2910 			;
   2911 		else if (ifp->if_flags & IFF_RUNNING) {
   2912 			/*
   2913 			 * Multicast list has changed; set the hardware filter
   2914 			 * accordingly.
   2915 			 */
   2916 			WM_CORE_LOCK(sc);
   2917 			wm_set_filter(sc);
   2918 			WM_CORE_UNLOCK(sc);
   2919 		}
   2920 		break;
   2921 	}
   2922 
   2923 #ifndef WM_MPSAFE
   2924 	splx(s);
   2925 #endif
   2926 	return error;
   2927 }
   2928 
   2929 /* MAC address related */
   2930 
   2931 /*
   2932  * Get the offset of MAC address and return it.
   2933  * If error occured, use offset 0.
   2934  */
   2935 static uint16_t
   2936 wm_check_alt_mac_addr(struct wm_softc *sc)
   2937 {
   2938 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2939 	uint16_t offset = NVM_OFF_MACADDR;
   2940 
   2941 	/* Try to read alternative MAC address pointer */
   2942 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2943 		return 0;
   2944 
   2945 	/* Check pointer if it's valid or not. */
   2946 	if ((offset == 0x0000) || (offset == 0xffff))
   2947 		return 0;
   2948 
   2949 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2950 	/*
   2951 	 * Check whether alternative MAC address is valid or not.
   2952 	 * Some cards have non 0xffff pointer but those don't use
   2953 	 * alternative MAC address in reality.
   2954 	 *
   2955 	 * Check whether the broadcast bit is set or not.
   2956 	 */
   2957 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2958 		if (((myea[0] & 0xff) & 0x01) == 0)
   2959 			return offset; /* Found */
   2960 
   2961 	/* Not found */
   2962 	return 0;
   2963 }
   2964 
   2965 static int
   2966 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2967 {
   2968 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2969 	uint16_t offset = NVM_OFF_MACADDR;
   2970 	int do_invert = 0;
   2971 
   2972 	switch (sc->sc_type) {
   2973 	case WM_T_82580:
   2974 	case WM_T_I350:
   2975 	case WM_T_I354:
   2976 		/* EEPROM Top Level Partitioning */
   2977 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2978 		break;
   2979 	case WM_T_82571:
   2980 	case WM_T_82575:
   2981 	case WM_T_82576:
   2982 	case WM_T_80003:
   2983 	case WM_T_I210:
   2984 	case WM_T_I211:
   2985 		offset = wm_check_alt_mac_addr(sc);
   2986 		if (offset == 0)
   2987 			if ((sc->sc_funcid & 0x01) == 1)
   2988 				do_invert = 1;
   2989 		break;
   2990 	default:
   2991 		if ((sc->sc_funcid & 0x01) == 1)
   2992 			do_invert = 1;
   2993 		break;
   2994 	}
   2995 
   2996 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2997 		myea) != 0)
   2998 		goto bad;
   2999 
   3000 	enaddr[0] = myea[0] & 0xff;
   3001 	enaddr[1] = myea[0] >> 8;
   3002 	enaddr[2] = myea[1] & 0xff;
   3003 	enaddr[3] = myea[1] >> 8;
   3004 	enaddr[4] = myea[2] & 0xff;
   3005 	enaddr[5] = myea[2] >> 8;
   3006 
   3007 	/*
   3008 	 * Toggle the LSB of the MAC address on the second port
   3009 	 * of some dual port cards.
   3010 	 */
   3011 	if (do_invert != 0)
   3012 		enaddr[5] ^= 1;
   3013 
   3014 	return 0;
   3015 
   3016  bad:
   3017 	return -1;
   3018 }
   3019 
   3020 /*
   3021  * wm_set_ral:
   3022  *
   3023  *	Set an entery in the receive address list.
   3024  */
   3025 static void
   3026 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3027 {
   3028 	uint32_t ral_lo, ral_hi;
   3029 
   3030 	if (enaddr != NULL) {
   3031 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3032 		    (enaddr[3] << 24);
   3033 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3034 		ral_hi |= RAL_AV;
   3035 	} else {
   3036 		ral_lo = 0;
   3037 		ral_hi = 0;
   3038 	}
   3039 
   3040 	if (sc->sc_type >= WM_T_82544) {
   3041 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3042 		    ral_lo);
   3043 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3044 		    ral_hi);
   3045 	} else {
   3046 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3047 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3048 	}
   3049 }
   3050 
   3051 /*
   3052  * wm_mchash:
   3053  *
   3054  *	Compute the hash of the multicast address for the 4096-bit
   3055  *	multicast filter.
   3056  */
   3057 static uint32_t
   3058 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3059 {
   3060 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3061 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3062 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3063 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3064 	uint32_t hash;
   3065 
   3066 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3067 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3068 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3069 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3070 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3071 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3072 		return (hash & 0x3ff);
   3073 	}
   3074 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3075 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3076 
   3077 	return (hash & 0xfff);
   3078 }
   3079 
   3080 /*
   3081  * wm_set_filter:
   3082  *
   3083  *	Set up the receive filter.
   3084  */
   3085 static void
   3086 wm_set_filter(struct wm_softc *sc)
   3087 {
   3088 	struct ethercom *ec = &sc->sc_ethercom;
   3089 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3090 	struct ether_multi *enm;
   3091 	struct ether_multistep step;
   3092 	bus_addr_t mta_reg;
   3093 	uint32_t hash, reg, bit;
   3094 	int i, size, ralmax;
   3095 
   3096 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3097 		device_xname(sc->sc_dev), __func__));
   3098 
   3099 	if (sc->sc_type >= WM_T_82544)
   3100 		mta_reg = WMREG_CORDOVA_MTA;
   3101 	else
   3102 		mta_reg = WMREG_MTA;
   3103 
   3104 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3105 
   3106 	if (ifp->if_flags & IFF_BROADCAST)
   3107 		sc->sc_rctl |= RCTL_BAM;
   3108 	if (ifp->if_flags & IFF_PROMISC) {
   3109 		sc->sc_rctl |= RCTL_UPE;
   3110 		goto allmulti;
   3111 	}
   3112 
   3113 	/*
   3114 	 * Set the station address in the first RAL slot, and
   3115 	 * clear the remaining slots.
   3116 	 */
   3117 	if (sc->sc_type == WM_T_ICH8)
   3118 		size = WM_RAL_TABSIZE_ICH8 -1;
   3119 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3120 	    || (sc->sc_type == WM_T_PCH))
   3121 		size = WM_RAL_TABSIZE_ICH8;
   3122 	else if (sc->sc_type == WM_T_PCH2)
   3123 		size = WM_RAL_TABSIZE_PCH2;
   3124 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3125 		size = WM_RAL_TABSIZE_PCH_LPT;
   3126 	else if (sc->sc_type == WM_T_82575)
   3127 		size = WM_RAL_TABSIZE_82575;
   3128 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3129 		size = WM_RAL_TABSIZE_82576;
   3130 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3131 		size = WM_RAL_TABSIZE_I350;
   3132 	else
   3133 		size = WM_RAL_TABSIZE;
   3134 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3135 
   3136 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3137 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3138 		switch (i) {
   3139 		case 0:
   3140 			/* We can use all entries */
   3141 			ralmax = size;
   3142 			break;
   3143 		case 1:
   3144 			/* Only RAR[0] */
   3145 			ralmax = 1;
   3146 			break;
   3147 		default:
   3148 			/* available SHRA + RAR[0] */
   3149 			ralmax = i + 1;
   3150 		}
   3151 	} else
   3152 		ralmax = size;
   3153 	for (i = 1; i < size; i++) {
   3154 		if (i < ralmax)
   3155 			wm_set_ral(sc, NULL, i);
   3156 	}
   3157 
   3158 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3159 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3160 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3161 	    || (sc->sc_type == WM_T_PCH_SPT))
   3162 		size = WM_ICH8_MC_TABSIZE;
   3163 	else
   3164 		size = WM_MC_TABSIZE;
   3165 	/* Clear out the multicast table. */
   3166 	for (i = 0; i < size; i++)
   3167 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3168 
   3169 	ETHER_FIRST_MULTI(step, ec, enm);
   3170 	while (enm != NULL) {
   3171 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3172 			/*
   3173 			 * We must listen to a range of multicast addresses.
   3174 			 * For now, just accept all multicasts, rather than
   3175 			 * trying to set only those filter bits needed to match
   3176 			 * the range.  (At this time, the only use of address
   3177 			 * ranges is for IP multicast routing, for which the
   3178 			 * range is big enough to require all bits set.)
   3179 			 */
   3180 			goto allmulti;
   3181 		}
   3182 
   3183 		hash = wm_mchash(sc, enm->enm_addrlo);
   3184 
   3185 		reg = (hash >> 5);
   3186 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3187 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3188 		    || (sc->sc_type == WM_T_PCH2)
   3189 		    || (sc->sc_type == WM_T_PCH_LPT)
   3190 		    || (sc->sc_type == WM_T_PCH_SPT))
   3191 			reg &= 0x1f;
   3192 		else
   3193 			reg &= 0x7f;
   3194 		bit = hash & 0x1f;
   3195 
   3196 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3197 		hash |= 1U << bit;
   3198 
   3199 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3200 			/*
   3201 			 * 82544 Errata 9: Certain register cannot be written
   3202 			 * with particular alignments in PCI-X bus operation
   3203 			 * (FCAH, MTA and VFTA).
   3204 			 */
   3205 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3206 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3207 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3208 		} else
   3209 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3210 
   3211 		ETHER_NEXT_MULTI(step, enm);
   3212 	}
   3213 
   3214 	ifp->if_flags &= ~IFF_ALLMULTI;
   3215 	goto setit;
   3216 
   3217  allmulti:
   3218 	ifp->if_flags |= IFF_ALLMULTI;
   3219 	sc->sc_rctl |= RCTL_MPE;
   3220 
   3221  setit:
   3222 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3223 }
   3224 
   3225 /* Reset and init related */
   3226 
   3227 static void
   3228 wm_set_vlan(struct wm_softc *sc)
   3229 {
   3230 
   3231 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3232 		device_xname(sc->sc_dev), __func__));
   3233 
   3234 	/* Deal with VLAN enables. */
   3235 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3236 		sc->sc_ctrl |= CTRL_VME;
   3237 	else
   3238 		sc->sc_ctrl &= ~CTRL_VME;
   3239 
   3240 	/* Write the control registers. */
   3241 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3242 }
   3243 
   3244 static void
   3245 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3246 {
   3247 	uint32_t gcr;
   3248 	pcireg_t ctrl2;
   3249 
   3250 	gcr = CSR_READ(sc, WMREG_GCR);
   3251 
   3252 	/* Only take action if timeout value is defaulted to 0 */
   3253 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3254 		goto out;
   3255 
   3256 	if ((gcr & GCR_CAP_VER2) == 0) {
   3257 		gcr |= GCR_CMPL_TMOUT_10MS;
   3258 		goto out;
   3259 	}
   3260 
   3261 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3262 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3263 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3264 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3265 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3266 
   3267 out:
   3268 	/* Disable completion timeout resend */
   3269 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3270 
   3271 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3272 }
   3273 
   3274 void
   3275 wm_get_auto_rd_done(struct wm_softc *sc)
   3276 {
   3277 	int i;
   3278 
   3279 	/* wait for eeprom to reload */
   3280 	switch (sc->sc_type) {
   3281 	case WM_T_82571:
   3282 	case WM_T_82572:
   3283 	case WM_T_82573:
   3284 	case WM_T_82574:
   3285 	case WM_T_82583:
   3286 	case WM_T_82575:
   3287 	case WM_T_82576:
   3288 	case WM_T_82580:
   3289 	case WM_T_I350:
   3290 	case WM_T_I354:
   3291 	case WM_T_I210:
   3292 	case WM_T_I211:
   3293 	case WM_T_80003:
   3294 	case WM_T_ICH8:
   3295 	case WM_T_ICH9:
   3296 		for (i = 0; i < 10; i++) {
   3297 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3298 				break;
   3299 			delay(1000);
   3300 		}
   3301 		if (i == 10) {
   3302 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3303 			    "complete\n", device_xname(sc->sc_dev));
   3304 		}
   3305 		break;
   3306 	default:
   3307 		break;
   3308 	}
   3309 }
   3310 
   3311 void
   3312 wm_lan_init_done(struct wm_softc *sc)
   3313 {
   3314 	uint32_t reg = 0;
   3315 	int i;
   3316 
   3317 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3318 		device_xname(sc->sc_dev), __func__));
   3319 
   3320 	/* Wait for eeprom to reload */
   3321 	switch (sc->sc_type) {
   3322 	case WM_T_ICH10:
   3323 	case WM_T_PCH:
   3324 	case WM_T_PCH2:
   3325 	case WM_T_PCH_LPT:
   3326 	case WM_T_PCH_SPT:
   3327 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3328 			reg = CSR_READ(sc, WMREG_STATUS);
   3329 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3330 				break;
   3331 			delay(100);
   3332 		}
   3333 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3334 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3335 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3336 		}
   3337 		break;
   3338 	default:
   3339 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3340 		    __func__);
   3341 		break;
   3342 	}
   3343 
   3344 	reg &= ~STATUS_LAN_INIT_DONE;
   3345 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3346 }
   3347 
   3348 void
   3349 wm_get_cfg_done(struct wm_softc *sc)
   3350 {
   3351 	int mask;
   3352 	uint32_t reg;
   3353 	int i;
   3354 
   3355 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3356 		device_xname(sc->sc_dev), __func__));
   3357 
   3358 	/* Wait for eeprom to reload */
   3359 	switch (sc->sc_type) {
   3360 	case WM_T_82542_2_0:
   3361 	case WM_T_82542_2_1:
   3362 		/* null */
   3363 		break;
   3364 	case WM_T_82543:
   3365 	case WM_T_82544:
   3366 	case WM_T_82540:
   3367 	case WM_T_82545:
   3368 	case WM_T_82545_3:
   3369 	case WM_T_82546:
   3370 	case WM_T_82546_3:
   3371 	case WM_T_82541:
   3372 	case WM_T_82541_2:
   3373 	case WM_T_82547:
   3374 	case WM_T_82547_2:
   3375 	case WM_T_82573:
   3376 	case WM_T_82574:
   3377 	case WM_T_82583:
   3378 		/* generic */
   3379 		delay(10*1000);
   3380 		break;
   3381 	case WM_T_80003:
   3382 	case WM_T_82571:
   3383 	case WM_T_82572:
   3384 	case WM_T_82575:
   3385 	case WM_T_82576:
   3386 	case WM_T_82580:
   3387 	case WM_T_I350:
   3388 	case WM_T_I354:
   3389 	case WM_T_I210:
   3390 	case WM_T_I211:
   3391 		if (sc->sc_type == WM_T_82571) {
   3392 			/* Only 82571 shares port 0 */
   3393 			mask = EEMNGCTL_CFGDONE_0;
   3394 		} else
   3395 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3396 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3397 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3398 				break;
   3399 			delay(1000);
   3400 		}
   3401 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3402 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3403 				device_xname(sc->sc_dev), __func__));
   3404 		}
   3405 		break;
   3406 	case WM_T_ICH8:
   3407 	case WM_T_ICH9:
   3408 	case WM_T_ICH10:
   3409 	case WM_T_PCH:
   3410 	case WM_T_PCH2:
   3411 	case WM_T_PCH_LPT:
   3412 	case WM_T_PCH_SPT:
   3413 		delay(10*1000);
   3414 		if (sc->sc_type >= WM_T_ICH10)
   3415 			wm_lan_init_done(sc);
   3416 		else
   3417 			wm_get_auto_rd_done(sc);
   3418 
   3419 		reg = CSR_READ(sc, WMREG_STATUS);
   3420 		if ((reg & STATUS_PHYRA) != 0)
   3421 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3422 		break;
   3423 	default:
   3424 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3425 		    __func__);
   3426 		break;
   3427 	}
   3428 }
   3429 
   3430 /* Init hardware bits */
   3431 void
   3432 wm_initialize_hardware_bits(struct wm_softc *sc)
   3433 {
   3434 	uint32_t tarc0, tarc1, reg;
   3435 
   3436 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3437 		device_xname(sc->sc_dev), __func__));
   3438 
   3439 	/* For 82571 variant, 80003 and ICHs */
   3440 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3441 	    || (sc->sc_type >= WM_T_80003)) {
   3442 
   3443 		/* Transmit Descriptor Control 0 */
   3444 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3445 		reg |= TXDCTL_COUNT_DESC;
   3446 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3447 
   3448 		/* Transmit Descriptor Control 1 */
   3449 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3450 		reg |= TXDCTL_COUNT_DESC;
   3451 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3452 
   3453 		/* TARC0 */
   3454 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3455 		switch (sc->sc_type) {
   3456 		case WM_T_82571:
   3457 		case WM_T_82572:
   3458 		case WM_T_82573:
   3459 		case WM_T_82574:
   3460 		case WM_T_82583:
   3461 		case WM_T_80003:
   3462 			/* Clear bits 30..27 */
   3463 			tarc0 &= ~__BITS(30, 27);
   3464 			break;
   3465 		default:
   3466 			break;
   3467 		}
   3468 
   3469 		switch (sc->sc_type) {
   3470 		case WM_T_82571:
   3471 		case WM_T_82572:
   3472 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3473 
   3474 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3475 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3476 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3477 			/* 8257[12] Errata No.7 */
   3478 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3479 
   3480 			/* TARC1 bit 28 */
   3481 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3482 				tarc1 &= ~__BIT(28);
   3483 			else
   3484 				tarc1 |= __BIT(28);
   3485 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3486 
   3487 			/*
   3488 			 * 8257[12] Errata No.13
   3489 			 * Disable Dyamic Clock Gating.
   3490 			 */
   3491 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3492 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3493 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3494 			break;
   3495 		case WM_T_82573:
   3496 		case WM_T_82574:
   3497 		case WM_T_82583:
   3498 			if ((sc->sc_type == WM_T_82574)
   3499 			    || (sc->sc_type == WM_T_82583))
   3500 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3501 
   3502 			/* Extended Device Control */
   3503 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3504 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3505 			reg |= __BIT(22);	/* Set bit 22 */
   3506 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3507 
   3508 			/* Device Control */
   3509 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3510 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3511 
   3512 			/* PCIe Control Register */
   3513 			/*
   3514 			 * 82573 Errata (unknown).
   3515 			 *
   3516 			 * 82574 Errata 25 and 82583 Errata 12
   3517 			 * "Dropped Rx Packets":
   3518 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3519 			 */
   3520 			reg = CSR_READ(sc, WMREG_GCR);
   3521 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3522 			CSR_WRITE(sc, WMREG_GCR, reg);
   3523 
   3524 			if ((sc->sc_type == WM_T_82574)
   3525 			    || (sc->sc_type == WM_T_82583)) {
   3526 				/*
   3527 				 * Document says this bit must be set for
   3528 				 * proper operation.
   3529 				 */
   3530 				reg = CSR_READ(sc, WMREG_GCR);
   3531 				reg |= __BIT(22);
   3532 				CSR_WRITE(sc, WMREG_GCR, reg);
   3533 
   3534 				/*
   3535 				 * Apply workaround for hardware errata
   3536 				 * documented in errata docs Fixes issue where
   3537 				 * some error prone or unreliable PCIe
   3538 				 * completions are occurring, particularly
   3539 				 * with ASPM enabled. Without fix, issue can
   3540 				 * cause Tx timeouts.
   3541 				 */
   3542 				reg = CSR_READ(sc, WMREG_GCR2);
   3543 				reg |= __BIT(0);
   3544 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3545 			}
   3546 			break;
   3547 		case WM_T_80003:
   3548 			/* TARC0 */
   3549 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3550 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3551 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3552 
   3553 			/* TARC1 bit 28 */
   3554 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3555 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3556 				tarc1 &= ~__BIT(28);
   3557 			else
   3558 				tarc1 |= __BIT(28);
   3559 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3560 			break;
   3561 		case WM_T_ICH8:
   3562 		case WM_T_ICH9:
   3563 		case WM_T_ICH10:
   3564 		case WM_T_PCH:
   3565 		case WM_T_PCH2:
   3566 		case WM_T_PCH_LPT:
   3567 		case WM_T_PCH_SPT:
   3568 			/* TARC0 */
   3569 			if ((sc->sc_type == WM_T_ICH8)
   3570 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3571 				/* Set TARC0 bits 29 and 28 */
   3572 				tarc0 |= __BITS(29, 28);
   3573 			}
   3574 			/* Set TARC0 bits 23,24,26,27 */
   3575 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3576 
   3577 			/* CTRL_EXT */
   3578 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3579 			reg |= __BIT(22);	/* Set bit 22 */
   3580 			/*
   3581 			 * Enable PHY low-power state when MAC is at D3
   3582 			 * w/o WoL
   3583 			 */
   3584 			if (sc->sc_type >= WM_T_PCH)
   3585 				reg |= CTRL_EXT_PHYPDEN;
   3586 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3587 
   3588 			/* TARC1 */
   3589 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3590 			/* bit 28 */
   3591 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3592 				tarc1 &= ~__BIT(28);
   3593 			else
   3594 				tarc1 |= __BIT(28);
   3595 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3596 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3597 
   3598 			/* Device Status */
   3599 			if (sc->sc_type == WM_T_ICH8) {
   3600 				reg = CSR_READ(sc, WMREG_STATUS);
   3601 				reg &= ~__BIT(31);
   3602 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3603 
   3604 			}
   3605 
   3606 			/* IOSFPC */
   3607 			if (sc->sc_type == WM_T_PCH_SPT) {
   3608 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3609 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3610 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3611 			}
   3612 			/*
   3613 			 * Work-around descriptor data corruption issue during
   3614 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3615 			 * capability.
   3616 			 */
   3617 			reg = CSR_READ(sc, WMREG_RFCTL);
   3618 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3619 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3620 			break;
   3621 		default:
   3622 			break;
   3623 		}
   3624 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3625 
   3626 		/*
   3627 		 * 8257[12] Errata No.52 and some others.
   3628 		 * Avoid RSS Hash Value bug.
   3629 		 */
   3630 		switch (sc->sc_type) {
   3631 		case WM_T_82571:
   3632 		case WM_T_82572:
   3633 		case WM_T_82573:
   3634 		case WM_T_80003:
   3635 		case WM_T_ICH8:
   3636 			reg = CSR_READ(sc, WMREG_RFCTL);
   3637 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3638 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3639 			break;
   3640 		default:
   3641 			break;
   3642 		}
   3643 	}
   3644 }
   3645 
   3646 static uint32_t
   3647 wm_rxpbs_adjust_82580(uint32_t val)
   3648 {
   3649 	uint32_t rv = 0;
   3650 
   3651 	if (val < __arraycount(wm_82580_rxpbs_table))
   3652 		rv = wm_82580_rxpbs_table[val];
   3653 
   3654 	return rv;
   3655 }
   3656 
   3657 /*
   3658  * wm_reset:
   3659  *
   3660  *	Reset the i82542 chip.
   3661  */
   3662 static void
   3663 wm_reset(struct wm_softc *sc)
   3664 {
   3665 	int phy_reset = 0;
   3666 	int i, error = 0;
   3667 	uint32_t reg, mask;
   3668 
   3669 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3670 		device_xname(sc->sc_dev), __func__));
   3671 	KASSERT(sc->sc_type != 0);
   3672 
   3673 	/*
   3674 	 * Allocate on-chip memory according to the MTU size.
   3675 	 * The Packet Buffer Allocation register must be written
   3676 	 * before the chip is reset.
   3677 	 */
   3678 	switch (sc->sc_type) {
   3679 	case WM_T_82547:
   3680 	case WM_T_82547_2:
   3681 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3682 		    PBA_22K : PBA_30K;
   3683 		for (i = 0; i < sc->sc_nqueues; i++) {
   3684 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3685 			txq->txq_fifo_head = 0;
   3686 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3687 			txq->txq_fifo_size =
   3688 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3689 			txq->txq_fifo_stall = 0;
   3690 		}
   3691 		break;
   3692 	case WM_T_82571:
   3693 	case WM_T_82572:
   3694 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3695 	case WM_T_80003:
   3696 		sc->sc_pba = PBA_32K;
   3697 		break;
   3698 	case WM_T_82573:
   3699 		sc->sc_pba = PBA_12K;
   3700 		break;
   3701 	case WM_T_82574:
   3702 	case WM_T_82583:
   3703 		sc->sc_pba = PBA_20K;
   3704 		break;
   3705 	case WM_T_82576:
   3706 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3707 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3708 		break;
   3709 	case WM_T_82580:
   3710 	case WM_T_I350:
   3711 	case WM_T_I354:
   3712 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3713 		break;
   3714 	case WM_T_I210:
   3715 	case WM_T_I211:
   3716 		sc->sc_pba = PBA_34K;
   3717 		break;
   3718 	case WM_T_ICH8:
   3719 		/* Workaround for a bit corruption issue in FIFO memory */
   3720 		sc->sc_pba = PBA_8K;
   3721 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3722 		break;
   3723 	case WM_T_ICH9:
   3724 	case WM_T_ICH10:
   3725 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3726 		    PBA_14K : PBA_10K;
   3727 		break;
   3728 	case WM_T_PCH:
   3729 	case WM_T_PCH2:
   3730 	case WM_T_PCH_LPT:
   3731 	case WM_T_PCH_SPT:
   3732 		sc->sc_pba = PBA_26K;
   3733 		break;
   3734 	default:
   3735 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3736 		    PBA_40K : PBA_48K;
   3737 		break;
   3738 	}
   3739 	/*
   3740 	 * Only old or non-multiqueue devices have the PBA register
   3741 	 * XXX Need special handling for 82575.
   3742 	 */
   3743 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3744 	    || (sc->sc_type == WM_T_82575))
   3745 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3746 
   3747 	/* Prevent the PCI-E bus from sticking */
   3748 	if (sc->sc_flags & WM_F_PCIE) {
   3749 		int timeout = 800;
   3750 
   3751 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3752 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3753 
   3754 		while (timeout--) {
   3755 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3756 			    == 0)
   3757 				break;
   3758 			delay(100);
   3759 		}
   3760 	}
   3761 
   3762 	/* Set the completion timeout for interface */
   3763 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3764 	    || (sc->sc_type == WM_T_82580)
   3765 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3766 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3767 		wm_set_pcie_completion_timeout(sc);
   3768 
   3769 	/* Clear interrupt */
   3770 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3771 	if (sc->sc_nintrs > 1) {
   3772 		if (sc->sc_type != WM_T_82574) {
   3773 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3774 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3775 		} else {
   3776 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3777 		}
   3778 	}
   3779 
   3780 	/* Stop the transmit and receive processes. */
   3781 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3782 	sc->sc_rctl &= ~RCTL_EN;
   3783 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3784 	CSR_WRITE_FLUSH(sc);
   3785 
   3786 	/* XXX set_tbi_sbp_82543() */
   3787 
   3788 	delay(10*1000);
   3789 
   3790 	/* Must acquire the MDIO ownership before MAC reset */
   3791 	switch (sc->sc_type) {
   3792 	case WM_T_82573:
   3793 	case WM_T_82574:
   3794 	case WM_T_82583:
   3795 		error = wm_get_hw_semaphore_82573(sc);
   3796 		break;
   3797 	default:
   3798 		break;
   3799 	}
   3800 
   3801 	/*
   3802 	 * 82541 Errata 29? & 82547 Errata 28?
   3803 	 * See also the description about PHY_RST bit in CTRL register
   3804 	 * in 8254x_GBe_SDM.pdf.
   3805 	 */
   3806 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3807 		CSR_WRITE(sc, WMREG_CTRL,
   3808 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3809 		CSR_WRITE_FLUSH(sc);
   3810 		delay(5000);
   3811 	}
   3812 
   3813 	switch (sc->sc_type) {
   3814 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3815 	case WM_T_82541:
   3816 	case WM_T_82541_2:
   3817 	case WM_T_82547:
   3818 	case WM_T_82547_2:
   3819 		/*
   3820 		 * On some chipsets, a reset through a memory-mapped write
   3821 		 * cycle can cause the chip to reset before completing the
   3822 		 * write cycle.  This causes major headache that can be
   3823 		 * avoided by issuing the reset via indirect register writes
   3824 		 * through I/O space.
   3825 		 *
   3826 		 * So, if we successfully mapped the I/O BAR at attach time,
   3827 		 * use that.  Otherwise, try our luck with a memory-mapped
   3828 		 * reset.
   3829 		 */
   3830 		if (sc->sc_flags & WM_F_IOH_VALID)
   3831 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3832 		else
   3833 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3834 		break;
   3835 	case WM_T_82545_3:
   3836 	case WM_T_82546_3:
   3837 		/* Use the shadow control register on these chips. */
   3838 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3839 		break;
   3840 	case WM_T_80003:
   3841 		mask = swfwphysem[sc->sc_funcid];
   3842 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3843 		wm_get_swfw_semaphore(sc, mask);
   3844 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3845 		wm_put_swfw_semaphore(sc, mask);
   3846 		break;
   3847 	case WM_T_ICH8:
   3848 	case WM_T_ICH9:
   3849 	case WM_T_ICH10:
   3850 	case WM_T_PCH:
   3851 	case WM_T_PCH2:
   3852 	case WM_T_PCH_LPT:
   3853 	case WM_T_PCH_SPT:
   3854 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3855 		if (wm_phy_resetisblocked(sc) == false) {
   3856 			/*
   3857 			 * Gate automatic PHY configuration by hardware on
   3858 			 * non-managed 82579
   3859 			 */
   3860 			if ((sc->sc_type == WM_T_PCH2)
   3861 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3862 				== 0))
   3863 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3864 
   3865 			reg |= CTRL_PHY_RESET;
   3866 			phy_reset = 1;
   3867 		} else
   3868 			printf("XXX reset is blocked!!!\n");
   3869 		wm_get_swfwhw_semaphore(sc);
   3870 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3871 		/* Don't insert a completion barrier when reset */
   3872 		delay(20*1000);
   3873 		wm_put_swfwhw_semaphore(sc);
   3874 		break;
   3875 	case WM_T_82580:
   3876 	case WM_T_I350:
   3877 	case WM_T_I354:
   3878 	case WM_T_I210:
   3879 	case WM_T_I211:
   3880 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3881 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3882 			CSR_WRITE_FLUSH(sc);
   3883 		delay(5000);
   3884 		break;
   3885 	case WM_T_82542_2_0:
   3886 	case WM_T_82542_2_1:
   3887 	case WM_T_82543:
   3888 	case WM_T_82540:
   3889 	case WM_T_82545:
   3890 	case WM_T_82546:
   3891 	case WM_T_82571:
   3892 	case WM_T_82572:
   3893 	case WM_T_82573:
   3894 	case WM_T_82574:
   3895 	case WM_T_82575:
   3896 	case WM_T_82576:
   3897 	case WM_T_82583:
   3898 	default:
   3899 		/* Everything else can safely use the documented method. */
   3900 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3901 		break;
   3902 	}
   3903 
   3904 	/* Must release the MDIO ownership after MAC reset */
   3905 	switch (sc->sc_type) {
   3906 	case WM_T_82573:
   3907 	case WM_T_82574:
   3908 	case WM_T_82583:
   3909 		if (error == 0)
   3910 			wm_put_hw_semaphore_82573(sc);
   3911 		break;
   3912 	default:
   3913 		break;
   3914 	}
   3915 
   3916 	if (phy_reset != 0) {
   3917 		wm_get_cfg_done(sc);
   3918 		delay(10 * 1000);
   3919 		if (sc->sc_type >= WM_T_PCH) {
   3920 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3921 			    BM_PORT_GEN_CFG);
   3922 			reg &= ~BM_WUC_HOST_WU_BIT;
   3923 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   3924 			    BM_PORT_GEN_CFG, reg);
   3925 		}
   3926 	}
   3927 
   3928 	/* reload EEPROM */
   3929 	switch (sc->sc_type) {
   3930 	case WM_T_82542_2_0:
   3931 	case WM_T_82542_2_1:
   3932 	case WM_T_82543:
   3933 	case WM_T_82544:
   3934 		delay(10);
   3935 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3936 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3937 		CSR_WRITE_FLUSH(sc);
   3938 		delay(2000);
   3939 		break;
   3940 	case WM_T_82540:
   3941 	case WM_T_82545:
   3942 	case WM_T_82545_3:
   3943 	case WM_T_82546:
   3944 	case WM_T_82546_3:
   3945 		delay(5*1000);
   3946 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3947 		break;
   3948 	case WM_T_82541:
   3949 	case WM_T_82541_2:
   3950 	case WM_T_82547:
   3951 	case WM_T_82547_2:
   3952 		delay(20000);
   3953 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3954 		break;
   3955 	case WM_T_82571:
   3956 	case WM_T_82572:
   3957 	case WM_T_82573:
   3958 	case WM_T_82574:
   3959 	case WM_T_82583:
   3960 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3961 			delay(10);
   3962 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3963 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3964 			CSR_WRITE_FLUSH(sc);
   3965 		}
   3966 		/* check EECD_EE_AUTORD */
   3967 		wm_get_auto_rd_done(sc);
   3968 		/*
   3969 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3970 		 * is set.
   3971 		 */
   3972 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3973 		    || (sc->sc_type == WM_T_82583))
   3974 			delay(25*1000);
   3975 		break;
   3976 	case WM_T_82575:
   3977 	case WM_T_82576:
   3978 	case WM_T_82580:
   3979 	case WM_T_I350:
   3980 	case WM_T_I354:
   3981 	case WM_T_I210:
   3982 	case WM_T_I211:
   3983 	case WM_T_80003:
   3984 		/* check EECD_EE_AUTORD */
   3985 		wm_get_auto_rd_done(sc);
   3986 		break;
   3987 	case WM_T_ICH8:
   3988 	case WM_T_ICH9:
   3989 	case WM_T_ICH10:
   3990 	case WM_T_PCH:
   3991 	case WM_T_PCH2:
   3992 	case WM_T_PCH_LPT:
   3993 	case WM_T_PCH_SPT:
   3994 		break;
   3995 	default:
   3996 		panic("%s: unknown type\n", __func__);
   3997 	}
   3998 
   3999 	/* Check whether EEPROM is present or not */
   4000 	switch (sc->sc_type) {
   4001 	case WM_T_82575:
   4002 	case WM_T_82576:
   4003 	case WM_T_82580:
   4004 	case WM_T_I350:
   4005 	case WM_T_I354:
   4006 	case WM_T_ICH8:
   4007 	case WM_T_ICH9:
   4008 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4009 			/* Not found */
   4010 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4011 			if (sc->sc_type == WM_T_82575)
   4012 				wm_reset_init_script_82575(sc);
   4013 		}
   4014 		break;
   4015 	default:
   4016 		break;
   4017 	}
   4018 
   4019 	if ((sc->sc_type == WM_T_82580)
   4020 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4021 		/* clear global device reset status bit */
   4022 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4023 	}
   4024 
   4025 	/* Clear any pending interrupt events. */
   4026 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4027 	reg = CSR_READ(sc, WMREG_ICR);
   4028 	if (sc->sc_nintrs > 1) {
   4029 		if (sc->sc_type != WM_T_82574) {
   4030 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4031 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4032 		} else
   4033 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4034 	}
   4035 
   4036 	/* reload sc_ctrl */
   4037 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4038 
   4039 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4040 		wm_set_eee_i350(sc);
   4041 
   4042 	/* dummy read from WUC */
   4043 	if (sc->sc_type == WM_T_PCH)
   4044 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4045 	/*
   4046 	 * For PCH, this write will make sure that any noise will be detected
   4047 	 * as a CRC error and be dropped rather than show up as a bad packet
   4048 	 * to the DMA engine
   4049 	 */
   4050 	if (sc->sc_type == WM_T_PCH)
   4051 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4052 
   4053 	if (sc->sc_type >= WM_T_82544)
   4054 		CSR_WRITE(sc, WMREG_WUC, 0);
   4055 
   4056 	wm_reset_mdicnfg_82580(sc);
   4057 
   4058 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4059 		wm_pll_workaround_i210(sc);
   4060 }
   4061 
   4062 /*
   4063  * wm_add_rxbuf:
   4064  *
   4065  *	Add a receive buffer to the indiciated descriptor.
   4066  */
   4067 static int
   4068 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4069 {
   4070 	struct wm_softc *sc = rxq->rxq_sc;
   4071 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4072 	struct mbuf *m;
   4073 	int error;
   4074 
   4075 	KASSERT(mutex_owned(rxq->rxq_lock));
   4076 
   4077 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4078 	if (m == NULL)
   4079 		return ENOBUFS;
   4080 
   4081 	MCLGET(m, M_DONTWAIT);
   4082 	if ((m->m_flags & M_EXT) == 0) {
   4083 		m_freem(m);
   4084 		return ENOBUFS;
   4085 	}
   4086 
   4087 	if (rxs->rxs_mbuf != NULL)
   4088 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4089 
   4090 	rxs->rxs_mbuf = m;
   4091 
   4092 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4093 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4094 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4095 	if (error) {
   4096 		/* XXX XXX XXX */
   4097 		aprint_error_dev(sc->sc_dev,
   4098 		    "unable to load rx DMA map %d, error = %d\n",
   4099 		    idx, error);
   4100 		panic("wm_add_rxbuf");
   4101 	}
   4102 
   4103 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4104 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4105 
   4106 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4107 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4108 			wm_init_rxdesc(rxq, idx);
   4109 	} else
   4110 		wm_init_rxdesc(rxq, idx);
   4111 
   4112 	return 0;
   4113 }
   4114 
   4115 /*
   4116  * wm_rxdrain:
   4117  *
   4118  *	Drain the receive queue.
   4119  */
   4120 static void
   4121 wm_rxdrain(struct wm_rxqueue *rxq)
   4122 {
   4123 	struct wm_softc *sc = rxq->rxq_sc;
   4124 	struct wm_rxsoft *rxs;
   4125 	int i;
   4126 
   4127 	KASSERT(mutex_owned(rxq->rxq_lock));
   4128 
   4129 	for (i = 0; i < WM_NRXDESC; i++) {
   4130 		rxs = &rxq->rxq_soft[i];
   4131 		if (rxs->rxs_mbuf != NULL) {
   4132 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4133 			m_freem(rxs->rxs_mbuf);
   4134 			rxs->rxs_mbuf = NULL;
   4135 		}
   4136 	}
   4137 }
   4138 
   4139 
   4140 /*
   4141  * XXX copy from FreeBSD's sys/net/rss_config.c
   4142  */
   4143 /*
   4144  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4145  * effectiveness may be limited by algorithm choice and available entropy
   4146  * during the boot.
   4147  *
   4148  * XXXRW: And that we don't randomize it yet!
   4149  *
   4150  * This is the default Microsoft RSS specification key which is also
   4151  * the Chelsio T5 firmware default key.
   4152  */
   4153 #define RSS_KEYSIZE 40
   4154 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4155 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4156 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4157 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4158 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4159 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4160 };
   4161 
   4162 /*
   4163  * Caller must pass an array of size sizeof(rss_key).
   4164  *
   4165  * XXX
   4166  * As if_ixgbe may use this function, this function should not be
   4167  * if_wm specific function.
   4168  */
   4169 static void
   4170 wm_rss_getkey(uint8_t *key)
   4171 {
   4172 
   4173 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4174 }
   4175 
   4176 /*
   4177  * Setup registers for RSS.
   4178  *
   4179  * XXX not yet VMDq support
   4180  */
   4181 static void
   4182 wm_init_rss(struct wm_softc *sc)
   4183 {
   4184 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4185 	int i;
   4186 
   4187 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4188 
   4189 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4190 		int qid, reta_ent;
   4191 
   4192 		qid  = i % sc->sc_nqueues;
   4193 		switch(sc->sc_type) {
   4194 		case WM_T_82574:
   4195 			reta_ent = __SHIFTIN(qid,
   4196 			    RETA_ENT_QINDEX_MASK_82574);
   4197 			break;
   4198 		case WM_T_82575:
   4199 			reta_ent = __SHIFTIN(qid,
   4200 			    RETA_ENT_QINDEX1_MASK_82575);
   4201 			break;
   4202 		default:
   4203 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4204 			break;
   4205 		}
   4206 
   4207 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4208 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4209 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4210 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4211 	}
   4212 
   4213 	wm_rss_getkey((uint8_t *)rss_key);
   4214 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4215 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4216 
   4217 	if (sc->sc_type == WM_T_82574)
   4218 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4219 	else
   4220 		mrqc = MRQC_ENABLE_RSS_MQ;
   4221 
   4222 	/* XXXX
   4223 	 * The same as FreeBSD igb.
   4224 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4225 	 */
   4226 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4227 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4228 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4229 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4230 
   4231 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4232 }
   4233 
   4234 /*
   4235  * Adjust TX and RX queue numbers which the system actulally uses.
   4236  *
   4237  * The numbers are affected by below parameters.
   4238  *     - The nubmer of hardware queues
   4239  *     - The number of MSI-X vectors (= "nvectors" argument)
   4240  *     - ncpu
   4241  */
   4242 static void
   4243 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4244 {
   4245 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4246 
   4247 	if (nvectors < 2) {
   4248 		sc->sc_nqueues = 1;
   4249 		return;
   4250 	}
   4251 
   4252 	switch(sc->sc_type) {
   4253 	case WM_T_82572:
   4254 		hw_ntxqueues = 2;
   4255 		hw_nrxqueues = 2;
   4256 		break;
   4257 	case WM_T_82574:
   4258 		hw_ntxqueues = 2;
   4259 		hw_nrxqueues = 2;
   4260 		break;
   4261 	case WM_T_82575:
   4262 		hw_ntxqueues = 4;
   4263 		hw_nrxqueues = 4;
   4264 		break;
   4265 	case WM_T_82576:
   4266 		hw_ntxqueues = 16;
   4267 		hw_nrxqueues = 16;
   4268 		break;
   4269 	case WM_T_82580:
   4270 	case WM_T_I350:
   4271 	case WM_T_I354:
   4272 		hw_ntxqueues = 8;
   4273 		hw_nrxqueues = 8;
   4274 		break;
   4275 	case WM_T_I210:
   4276 		hw_ntxqueues = 4;
   4277 		hw_nrxqueues = 4;
   4278 		break;
   4279 	case WM_T_I211:
   4280 		hw_ntxqueues = 2;
   4281 		hw_nrxqueues = 2;
   4282 		break;
   4283 		/*
   4284 		 * As below ethernet controllers does not support MSI-X,
   4285 		 * this driver let them not use multiqueue.
   4286 		 *     - WM_T_80003
   4287 		 *     - WM_T_ICH8
   4288 		 *     - WM_T_ICH9
   4289 		 *     - WM_T_ICH10
   4290 		 *     - WM_T_PCH
   4291 		 *     - WM_T_PCH2
   4292 		 *     - WM_T_PCH_LPT
   4293 		 */
   4294 	default:
   4295 		hw_ntxqueues = 1;
   4296 		hw_nrxqueues = 1;
   4297 		break;
   4298 	}
   4299 
   4300 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4301 
   4302 	/*
   4303 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4304 	 * the number of queues used actually.
   4305 	 */
   4306 	if (nvectors < hw_nqueues + 1) {
   4307 		sc->sc_nqueues = nvectors - 1;
   4308 	} else {
   4309 		sc->sc_nqueues = hw_nqueues;
   4310 	}
   4311 
   4312 	/*
   4313 	 * As queues more then cpus cannot improve scaling, we limit
   4314 	 * the number of queues used actually.
   4315 	 */
   4316 	if (ncpu < sc->sc_nqueues)
   4317 		sc->sc_nqueues = ncpu;
   4318 }
   4319 
   4320 /*
   4321  * Both single interrupt MSI and INTx can use this function.
   4322  */
   4323 static int
   4324 wm_setup_legacy(struct wm_softc *sc)
   4325 {
   4326 	pci_chipset_tag_t pc = sc->sc_pc;
   4327 	const char *intrstr = NULL;
   4328 	char intrbuf[PCI_INTRSTR_LEN];
   4329 	int error;
   4330 
   4331 	error = wm_alloc_txrx_queues(sc);
   4332 	if (error) {
   4333 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4334 		    error);
   4335 		return ENOMEM;
   4336 	}
   4337 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4338 	    sizeof(intrbuf));
   4339 #ifdef WM_MPSAFE
   4340 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4341 #endif
   4342 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4343 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4344 	if (sc->sc_ihs[0] == NULL) {
   4345 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4346 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4347 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4348 		return ENOMEM;
   4349 	}
   4350 
   4351 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4352 	sc->sc_nintrs = 1;
   4353 	return 0;
   4354 }
   4355 
   4356 static int
   4357 wm_setup_msix(struct wm_softc *sc)
   4358 {
   4359 	void *vih;
   4360 	kcpuset_t *affinity;
   4361 	int qidx, error, intr_idx, txrx_established;
   4362 	pci_chipset_tag_t pc = sc->sc_pc;
   4363 	const char *intrstr = NULL;
   4364 	char intrbuf[PCI_INTRSTR_LEN];
   4365 	char intr_xname[INTRDEVNAMEBUF];
   4366 
   4367 	if (sc->sc_nqueues < ncpu) {
   4368 		/*
   4369 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4370 		 * interrupts start from CPU#1.
   4371 		 */
   4372 		sc->sc_affinity_offset = 1;
   4373 	} else {
   4374 		/*
   4375 		 * In this case, this device use all CPUs. So, we unify
   4376 		 * affinitied cpu_index to msix vector number for readability.
   4377 		 */
   4378 		sc->sc_affinity_offset = 0;
   4379 	}
   4380 
   4381 	error = wm_alloc_txrx_queues(sc);
   4382 	if (error) {
   4383 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4384 		    error);
   4385 		return ENOMEM;
   4386 	}
   4387 
   4388 	kcpuset_create(&affinity, false);
   4389 	intr_idx = 0;
   4390 
   4391 	/*
   4392 	 * TX and RX
   4393 	 */
   4394 	txrx_established = 0;
   4395 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4396 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4397 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4398 
   4399 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4400 		    sizeof(intrbuf));
   4401 #ifdef WM_MPSAFE
   4402 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4403 		    PCI_INTR_MPSAFE, true);
   4404 #endif
   4405 		memset(intr_xname, 0, sizeof(intr_xname));
   4406 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4407 		    device_xname(sc->sc_dev), qidx);
   4408 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4409 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4410 		if (vih == NULL) {
   4411 			aprint_error_dev(sc->sc_dev,
   4412 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4413 			    intrstr ? " at " : "",
   4414 			    intrstr ? intrstr : "");
   4415 
   4416 			goto fail;
   4417 		}
   4418 		kcpuset_zero(affinity);
   4419 		/* Round-robin affinity */
   4420 		kcpuset_set(affinity, affinity_to);
   4421 		error = interrupt_distribute(vih, affinity, NULL);
   4422 		if (error == 0) {
   4423 			aprint_normal_dev(sc->sc_dev,
   4424 			    "for TX and RX interrupting at %s affinity to %u\n",
   4425 			    intrstr, affinity_to);
   4426 		} else {
   4427 			aprint_normal_dev(sc->sc_dev,
   4428 			    "for TX and RX interrupting at %s\n", intrstr);
   4429 		}
   4430 		sc->sc_ihs[intr_idx] = vih;
   4431 		wmq->wmq_id= qidx;
   4432 		wmq->wmq_intr_idx = intr_idx;
   4433 
   4434 		txrx_established++;
   4435 		intr_idx++;
   4436 	}
   4437 
   4438 	/*
   4439 	 * LINK
   4440 	 */
   4441 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4442 	    sizeof(intrbuf));
   4443 #ifdef WM_MPSAFE
   4444 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4445 #endif
   4446 	memset(intr_xname, 0, sizeof(intr_xname));
   4447 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4448 	    device_xname(sc->sc_dev));
   4449 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4450 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4451 	if (vih == NULL) {
   4452 		aprint_error_dev(sc->sc_dev,
   4453 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4454 		    intrstr ? " at " : "",
   4455 		    intrstr ? intrstr : "");
   4456 
   4457 		goto fail;
   4458 	}
   4459 	/* keep default affinity to LINK interrupt */
   4460 	aprint_normal_dev(sc->sc_dev,
   4461 	    "for LINK interrupting at %s\n", intrstr);
   4462 	sc->sc_ihs[intr_idx] = vih;
   4463 	sc->sc_link_intr_idx = intr_idx;
   4464 
   4465 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4466 	kcpuset_destroy(affinity);
   4467 	return 0;
   4468 
   4469  fail:
   4470 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4471 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4472 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4473 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4474 	}
   4475 
   4476 	kcpuset_destroy(affinity);
   4477 	return ENOMEM;
   4478 }
   4479 
   4480 /*
   4481  * wm_init:		[ifnet interface function]
   4482  *
   4483  *	Initialize the interface.
   4484  */
   4485 static int
   4486 wm_init(struct ifnet *ifp)
   4487 {
   4488 	struct wm_softc *sc = ifp->if_softc;
   4489 	int ret;
   4490 
   4491 	WM_CORE_LOCK(sc);
   4492 	ret = wm_init_locked(ifp);
   4493 	WM_CORE_UNLOCK(sc);
   4494 
   4495 	return ret;
   4496 }
   4497 
   4498 static int
   4499 wm_init_locked(struct ifnet *ifp)
   4500 {
   4501 	struct wm_softc *sc = ifp->if_softc;
   4502 	int i, j, trynum, error = 0;
   4503 	uint32_t reg;
   4504 
   4505 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4506 		device_xname(sc->sc_dev), __func__));
   4507 	KASSERT(WM_CORE_LOCKED(sc));
   4508 
   4509 	/*
   4510 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4511 	 * There is a small but measurable benefit to avoiding the adjusment
   4512 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4513 	 * on such platforms.  One possibility is that the DMA itself is
   4514 	 * slightly more efficient if the front of the entire packet (instead
   4515 	 * of the front of the headers) is aligned.
   4516 	 *
   4517 	 * Note we must always set align_tweak to 0 if we are using
   4518 	 * jumbo frames.
   4519 	 */
   4520 #ifdef __NO_STRICT_ALIGNMENT
   4521 	sc->sc_align_tweak = 0;
   4522 #else
   4523 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4524 		sc->sc_align_tweak = 0;
   4525 	else
   4526 		sc->sc_align_tweak = 2;
   4527 #endif /* __NO_STRICT_ALIGNMENT */
   4528 
   4529 	/* Cancel any pending I/O. */
   4530 	wm_stop_locked(ifp, 0);
   4531 
   4532 	/* update statistics before reset */
   4533 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4534 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4535 
   4536 	/* Reset the chip to a known state. */
   4537 	wm_reset(sc);
   4538 
   4539 	switch (sc->sc_type) {
   4540 	case WM_T_82571:
   4541 	case WM_T_82572:
   4542 	case WM_T_82573:
   4543 	case WM_T_82574:
   4544 	case WM_T_82583:
   4545 	case WM_T_80003:
   4546 	case WM_T_ICH8:
   4547 	case WM_T_ICH9:
   4548 	case WM_T_ICH10:
   4549 	case WM_T_PCH:
   4550 	case WM_T_PCH2:
   4551 	case WM_T_PCH_LPT:
   4552 	case WM_T_PCH_SPT:
   4553 		/* AMT based hardware can now take control from firmware */
   4554 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4555 			wm_get_hw_control(sc);
   4556 		break;
   4557 	default:
   4558 		break;
   4559 	}
   4560 
   4561 	/* Init hardware bits */
   4562 	wm_initialize_hardware_bits(sc);
   4563 
   4564 	/* Reset the PHY. */
   4565 	if (sc->sc_flags & WM_F_HAS_MII)
   4566 		wm_gmii_reset(sc);
   4567 
   4568 	/* Calculate (E)ITR value */
   4569 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4570 		sc->sc_itr = 450;	/* For EITR */
   4571 	} else if (sc->sc_type >= WM_T_82543) {
   4572 		/*
   4573 		 * Set up the interrupt throttling register (units of 256ns)
   4574 		 * Note that a footnote in Intel's documentation says this
   4575 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4576 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4577 		 * that that is also true for the 1024ns units of the other
   4578 		 * interrupt-related timer registers -- so, really, we ought
   4579 		 * to divide this value by 4 when the link speed is low.
   4580 		 *
   4581 		 * XXX implement this division at link speed change!
   4582 		 */
   4583 
   4584 		/*
   4585 		 * For N interrupts/sec, set this value to:
   4586 		 * 1000000000 / (N * 256).  Note that we set the
   4587 		 * absolute and packet timer values to this value
   4588 		 * divided by 4 to get "simple timer" behavior.
   4589 		 */
   4590 
   4591 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4592 	}
   4593 
   4594 	error = wm_init_txrx_queues(sc);
   4595 	if (error)
   4596 		goto out;
   4597 
   4598 	/*
   4599 	 * Clear out the VLAN table -- we don't use it (yet).
   4600 	 */
   4601 	CSR_WRITE(sc, WMREG_VET, 0);
   4602 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4603 		trynum = 10; /* Due to hw errata */
   4604 	else
   4605 		trynum = 1;
   4606 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4607 		for (j = 0; j < trynum; j++)
   4608 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4609 
   4610 	/*
   4611 	 * Set up flow-control parameters.
   4612 	 *
   4613 	 * XXX Values could probably stand some tuning.
   4614 	 */
   4615 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4616 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4617 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4618 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4619 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4620 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4621 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4622 	}
   4623 
   4624 	sc->sc_fcrtl = FCRTL_DFLT;
   4625 	if (sc->sc_type < WM_T_82543) {
   4626 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4627 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4628 	} else {
   4629 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4630 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4631 	}
   4632 
   4633 	if (sc->sc_type == WM_T_80003)
   4634 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4635 	else
   4636 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4637 
   4638 	/* Writes the control register. */
   4639 	wm_set_vlan(sc);
   4640 
   4641 	if (sc->sc_flags & WM_F_HAS_MII) {
   4642 		int val;
   4643 
   4644 		switch (sc->sc_type) {
   4645 		case WM_T_80003:
   4646 		case WM_T_ICH8:
   4647 		case WM_T_ICH9:
   4648 		case WM_T_ICH10:
   4649 		case WM_T_PCH:
   4650 		case WM_T_PCH2:
   4651 		case WM_T_PCH_LPT:
   4652 		case WM_T_PCH_SPT:
   4653 			/*
   4654 			 * Set the mac to wait the maximum time between each
   4655 			 * iteration and increase the max iterations when
   4656 			 * polling the phy; this fixes erroneous timeouts at
   4657 			 * 10Mbps.
   4658 			 */
   4659 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4660 			    0xFFFF);
   4661 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4662 			val |= 0x3F;
   4663 			wm_kmrn_writereg(sc,
   4664 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4665 			break;
   4666 		default:
   4667 			break;
   4668 		}
   4669 
   4670 		if (sc->sc_type == WM_T_80003) {
   4671 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4672 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4673 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4674 
   4675 			/* Bypass RX and TX FIFO's */
   4676 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4677 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4678 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4679 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4680 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4681 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4682 		}
   4683 	}
   4684 #if 0
   4685 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4686 #endif
   4687 
   4688 	/* Set up checksum offload parameters. */
   4689 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4690 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4691 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4692 		reg |= RXCSUM_IPOFL;
   4693 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4694 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4695 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4696 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4697 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4698 
   4699 	/* Set up MSI-X */
   4700 	if (sc->sc_nintrs > 1) {
   4701 		uint32_t ivar;
   4702 		struct wm_queue *wmq;
   4703 		int qid, qintr_idx;
   4704 
   4705 		if (sc->sc_type == WM_T_82575) {
   4706 			/* Interrupt control */
   4707 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4708 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4709 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4710 
   4711 			/* TX and RX */
   4712 			for (i = 0; i < sc->sc_nqueues; i++) {
   4713 				wmq = &sc->sc_queue[i];
   4714 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4715 				    EITR_TX_QUEUE(wmq->wmq_id)
   4716 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4717 			}
   4718 			/* Link status */
   4719 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4720 			    EITR_OTHER);
   4721 		} else if (sc->sc_type == WM_T_82574) {
   4722 			/* Interrupt control */
   4723 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4724 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4725 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4726 
   4727 			ivar = 0;
   4728 			/* TX and RX */
   4729 			for (i = 0; i < sc->sc_nqueues; i++) {
   4730 				wmq = &sc->sc_queue[i];
   4731 				qid = wmq->wmq_id;
   4732 				qintr_idx = wmq->wmq_intr_idx;
   4733 
   4734 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4735 				    IVAR_TX_MASK_Q_82574(qid));
   4736 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4737 				    IVAR_RX_MASK_Q_82574(qid));
   4738 			}
   4739 			/* Link status */
   4740 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4741 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4742 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4743 		} else {
   4744 			/* Interrupt control */
   4745 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4746 			    | GPIE_EIAME | GPIE_PBA);
   4747 
   4748 			switch (sc->sc_type) {
   4749 			case WM_T_82580:
   4750 			case WM_T_I350:
   4751 			case WM_T_I354:
   4752 			case WM_T_I210:
   4753 			case WM_T_I211:
   4754 				/* TX and RX */
   4755 				for (i = 0; i < sc->sc_nqueues; i++) {
   4756 					wmq = &sc->sc_queue[i];
   4757 					qid = wmq->wmq_id;
   4758 					qintr_idx = wmq->wmq_intr_idx;
   4759 
   4760 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4761 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4762 					ivar |= __SHIFTIN((qintr_idx
   4763 						| IVAR_VALID),
   4764 					    IVAR_TX_MASK_Q(qid));
   4765 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4766 					ivar |= __SHIFTIN((qintr_idx
   4767 						| IVAR_VALID),
   4768 					    IVAR_RX_MASK_Q(qid));
   4769 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4770 				}
   4771 				break;
   4772 			case WM_T_82576:
   4773 				/* TX and RX */
   4774 				for (i = 0; i < sc->sc_nqueues; i++) {
   4775 					wmq = &sc->sc_queue[i];
   4776 					qid = wmq->wmq_id;
   4777 					qintr_idx = wmq->wmq_intr_idx;
   4778 
   4779 					ivar = CSR_READ(sc,
   4780 					    WMREG_IVAR_Q_82576(qid));
   4781 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4782 					ivar |= __SHIFTIN((qintr_idx
   4783 						| IVAR_VALID),
   4784 					    IVAR_TX_MASK_Q_82576(qid));
   4785 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4786 					ivar |= __SHIFTIN((qintr_idx
   4787 						| IVAR_VALID),
   4788 					    IVAR_RX_MASK_Q_82576(qid));
   4789 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4790 					    ivar);
   4791 				}
   4792 				break;
   4793 			default:
   4794 				break;
   4795 			}
   4796 
   4797 			/* Link status */
   4798 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4799 			    IVAR_MISC_OTHER);
   4800 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4801 		}
   4802 
   4803 		if (sc->sc_nqueues > 1) {
   4804 			wm_init_rss(sc);
   4805 
   4806 			/*
   4807 			** NOTE: Receive Full-Packet Checksum Offload
   4808 			** is mutually exclusive with Multiqueue. However
   4809 			** this is not the same as TCP/IP checksums which
   4810 			** still work.
   4811 			*/
   4812 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4813 			reg |= RXCSUM_PCSD;
   4814 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4815 		}
   4816 	}
   4817 
   4818 	/* Set up the interrupt registers. */
   4819 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4820 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4821 	    ICR_RXO | ICR_RXT0;
   4822 	if (sc->sc_nintrs > 1) {
   4823 		uint32_t mask;
   4824 		struct wm_queue *wmq;
   4825 
   4826 		switch (sc->sc_type) {
   4827 		case WM_T_82574:
   4828 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4829 			    WMREG_EIAC_82574_MSIX_MASK);
   4830 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4831 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4832 			break;
   4833 		default:
   4834 			if (sc->sc_type == WM_T_82575) {
   4835 				mask = 0;
   4836 				for (i = 0; i < sc->sc_nqueues; i++) {
   4837 					wmq = &sc->sc_queue[i];
   4838 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4839 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4840 				}
   4841 				mask |= EITR_OTHER;
   4842 			} else {
   4843 				mask = 0;
   4844 				for (i = 0; i < sc->sc_nqueues; i++) {
   4845 					wmq = &sc->sc_queue[i];
   4846 					mask |= 1 << wmq->wmq_intr_idx;
   4847 				}
   4848 				mask |= 1 << sc->sc_link_intr_idx;
   4849 			}
   4850 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4851 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4852 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4853 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4854 			break;
   4855 		}
   4856 	} else
   4857 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4858 
   4859 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4860 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4861 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4862 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4863 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4864 		reg |= KABGTXD_BGSQLBIAS;
   4865 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4866 	}
   4867 
   4868 	/* Set up the inter-packet gap. */
   4869 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4870 
   4871 	if (sc->sc_type >= WM_T_82543) {
   4872 		/*
   4873 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4874 		 * the multi queue function with MSI-X.
   4875 		 */
   4876 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4877 			int qidx;
   4878 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4879 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   4880 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   4881 				    sc->sc_itr);
   4882 			}
   4883 			/*
   4884 			 * Link interrupts occur much less than TX
   4885 			 * interrupts and RX interrupts. So, we don't
   4886 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4887 			 * FreeBSD's if_igb.
   4888 			 */
   4889 		} else
   4890 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4891 	}
   4892 
   4893 	/* Set the VLAN ethernetype. */
   4894 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4895 
   4896 	/*
   4897 	 * Set up the transmit control register; we start out with
   4898 	 * a collision distance suitable for FDX, but update it whe
   4899 	 * we resolve the media type.
   4900 	 */
   4901 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4902 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4903 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4904 	if (sc->sc_type >= WM_T_82571)
   4905 		sc->sc_tctl |= TCTL_MULR;
   4906 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4907 
   4908 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4909 		/* Write TDT after TCTL.EN is set. See the document. */
   4910 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4911 	}
   4912 
   4913 	if (sc->sc_type == WM_T_80003) {
   4914 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4915 		reg &= ~TCTL_EXT_GCEX_MASK;
   4916 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4917 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4918 	}
   4919 
   4920 	/* Set the media. */
   4921 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4922 		goto out;
   4923 
   4924 	/* Configure for OS presence */
   4925 	wm_init_manageability(sc);
   4926 
   4927 	/*
   4928 	 * Set up the receive control register; we actually program
   4929 	 * the register when we set the receive filter.  Use multicast
   4930 	 * address offset type 0.
   4931 	 *
   4932 	 * Only the i82544 has the ability to strip the incoming
   4933 	 * CRC, so we don't enable that feature.
   4934 	 */
   4935 	sc->sc_mchash_type = 0;
   4936 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4937 	    | RCTL_MO(sc->sc_mchash_type);
   4938 
   4939 	/*
   4940 	 * The I350 has a bug where it always strips the CRC whether
   4941 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4942 	 */
   4943 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4944 	    || (sc->sc_type == WM_T_I210))
   4945 		sc->sc_rctl |= RCTL_SECRC;
   4946 
   4947 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4948 	    && (ifp->if_mtu > ETHERMTU)) {
   4949 		sc->sc_rctl |= RCTL_LPE;
   4950 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4951 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4952 	}
   4953 
   4954 	if (MCLBYTES == 2048) {
   4955 		sc->sc_rctl |= RCTL_2k;
   4956 	} else {
   4957 		if (sc->sc_type >= WM_T_82543) {
   4958 			switch (MCLBYTES) {
   4959 			case 4096:
   4960 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4961 				break;
   4962 			case 8192:
   4963 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4964 				break;
   4965 			case 16384:
   4966 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4967 				break;
   4968 			default:
   4969 				panic("wm_init: MCLBYTES %d unsupported",
   4970 				    MCLBYTES);
   4971 				break;
   4972 			}
   4973 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4974 	}
   4975 
   4976 	/* Set the receive filter. */
   4977 	wm_set_filter(sc);
   4978 
   4979 	/* Enable ECC */
   4980 	switch (sc->sc_type) {
   4981 	case WM_T_82571:
   4982 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4983 		reg |= PBA_ECC_CORR_EN;
   4984 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4985 		break;
   4986 	case WM_T_PCH_LPT:
   4987 	case WM_T_PCH_SPT:
   4988 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4989 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4990 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4991 
   4992 		reg = CSR_READ(sc, WMREG_CTRL);
   4993 		reg |= CTRL_MEHE;
   4994 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4995 		break;
   4996 	default:
   4997 		break;
   4998 	}
   4999 
   5000 	/* On 575 and later set RDT only if RX enabled */
   5001 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5002 		int qidx;
   5003 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5004 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5005 			for (i = 0; i < WM_NRXDESC; i++) {
   5006 				mutex_enter(rxq->rxq_lock);
   5007 				wm_init_rxdesc(rxq, i);
   5008 				mutex_exit(rxq->rxq_lock);
   5009 
   5010 			}
   5011 		}
   5012 	}
   5013 
   5014 	sc->sc_stopping = false;
   5015 
   5016 	/* Start the one second link check clock. */
   5017 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5018 
   5019 	/* ...all done! */
   5020 	ifp->if_flags |= IFF_RUNNING;
   5021 	ifp->if_flags &= ~IFF_OACTIVE;
   5022 
   5023  out:
   5024 	sc->sc_if_flags = ifp->if_flags;
   5025 	if (error)
   5026 		log(LOG_ERR, "%s: interface not running\n",
   5027 		    device_xname(sc->sc_dev));
   5028 	return error;
   5029 }
   5030 
   5031 /*
   5032  * wm_stop:		[ifnet interface function]
   5033  *
   5034  *	Stop transmission on the interface.
   5035  */
   5036 static void
   5037 wm_stop(struct ifnet *ifp, int disable)
   5038 {
   5039 	struct wm_softc *sc = ifp->if_softc;
   5040 
   5041 	WM_CORE_LOCK(sc);
   5042 	wm_stop_locked(ifp, disable);
   5043 	WM_CORE_UNLOCK(sc);
   5044 }
   5045 
   5046 static void
   5047 wm_stop_locked(struct ifnet *ifp, int disable)
   5048 {
   5049 	struct wm_softc *sc = ifp->if_softc;
   5050 	struct wm_txsoft *txs;
   5051 	int i, qidx;
   5052 
   5053 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5054 		device_xname(sc->sc_dev), __func__));
   5055 	KASSERT(WM_CORE_LOCKED(sc));
   5056 
   5057 	sc->sc_stopping = true;
   5058 
   5059 	/* Stop the one second clock. */
   5060 	callout_stop(&sc->sc_tick_ch);
   5061 
   5062 	/* Stop the 82547 Tx FIFO stall check timer. */
   5063 	if (sc->sc_type == WM_T_82547)
   5064 		callout_stop(&sc->sc_txfifo_ch);
   5065 
   5066 	if (sc->sc_flags & WM_F_HAS_MII) {
   5067 		/* Down the MII. */
   5068 		mii_down(&sc->sc_mii);
   5069 	} else {
   5070 #if 0
   5071 		/* Should we clear PHY's status properly? */
   5072 		wm_reset(sc);
   5073 #endif
   5074 	}
   5075 
   5076 	/* Stop the transmit and receive processes. */
   5077 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5078 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5079 	sc->sc_rctl &= ~RCTL_EN;
   5080 
   5081 	/*
   5082 	 * Clear the interrupt mask to ensure the device cannot assert its
   5083 	 * interrupt line.
   5084 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5085 	 * service any currently pending or shared interrupt.
   5086 	 */
   5087 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5088 	sc->sc_icr = 0;
   5089 	if (sc->sc_nintrs > 1) {
   5090 		if (sc->sc_type != WM_T_82574) {
   5091 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5092 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5093 		} else
   5094 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5095 	}
   5096 
   5097 	/* Release any queued transmit buffers. */
   5098 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5099 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5100 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5101 		mutex_enter(txq->txq_lock);
   5102 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5103 			txs = &txq->txq_soft[i];
   5104 			if (txs->txs_mbuf != NULL) {
   5105 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5106 				m_freem(txs->txs_mbuf);
   5107 				txs->txs_mbuf = NULL;
   5108 			}
   5109 		}
   5110 		if (sc->sc_type == WM_T_PCH_SPT) {
   5111 			pcireg_t preg;
   5112 			uint32_t reg;
   5113 			int nexttx;
   5114 
   5115 			/* First, disable MULR fix in FEXTNVM11 */
   5116 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5117 			reg |= FEXTNVM11_DIS_MULRFIX;
   5118 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5119 
   5120 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5121 			    WM_PCI_DESCRING_STATUS);
   5122 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5123 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5124 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5125 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5126 			    && (reg != 0)) {
   5127 				/* TX */
   5128 				printf("XXX need TX flush (reg = %08x)\n",
   5129 				    preg);
   5130 				wm_init_tx_descs(sc, txq);
   5131 				wm_init_tx_regs(sc, wmq, txq);
   5132 				nexttx = txq->txq_next;
   5133 				wm_set_dma_addr(
   5134 					&txq->txq_descs[nexttx].wtx_addr,
   5135 					WM_CDTXADDR(txq, nexttx));
   5136 				txq->txq_descs[nexttx].wtx_cmdlen
   5137 				    = htole32(WTX_CMD_IFCS | 512);
   5138 				wm_cdtxsync(txq, nexttx, 1,
   5139 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5140 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5141 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5142 				CSR_WRITE_FLUSH(sc);
   5143 				delay(250);
   5144 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5145 			}
   5146 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5147 			    WM_PCI_DESCRING_STATUS);
   5148 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5149 				/* RX */
   5150 				printf("XXX need RX flush\n");
   5151 			}
   5152 		}
   5153 		mutex_exit(txq->txq_lock);
   5154 	}
   5155 
   5156 	/* Mark the interface as down and cancel the watchdog timer. */
   5157 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5158 	ifp->if_timer = 0;
   5159 
   5160 	if (disable) {
   5161 		for (i = 0; i < sc->sc_nqueues; i++) {
   5162 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5163 			mutex_enter(rxq->rxq_lock);
   5164 			wm_rxdrain(rxq);
   5165 			mutex_exit(rxq->rxq_lock);
   5166 		}
   5167 	}
   5168 
   5169 #if 0 /* notyet */
   5170 	if (sc->sc_type >= WM_T_82544)
   5171 		CSR_WRITE(sc, WMREG_WUC, 0);
   5172 #endif
   5173 }
   5174 
   5175 static void
   5176 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5177 {
   5178 	struct mbuf *m;
   5179 	int i;
   5180 
   5181 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5182 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5183 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5184 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5185 		    m->m_data, m->m_len, m->m_flags);
   5186 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5187 	    i, i == 1 ? "" : "s");
   5188 }
   5189 
   5190 /*
   5191  * wm_82547_txfifo_stall:
   5192  *
   5193  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5194  *	reset the FIFO pointers, and restart packet transmission.
   5195  */
   5196 static void
   5197 wm_82547_txfifo_stall(void *arg)
   5198 {
   5199 	struct wm_softc *sc = arg;
   5200 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5201 
   5202 	mutex_enter(txq->txq_lock);
   5203 
   5204 	if (sc->sc_stopping)
   5205 		goto out;
   5206 
   5207 	if (txq->txq_fifo_stall) {
   5208 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5209 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5210 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5211 			/*
   5212 			 * Packets have drained.  Stop transmitter, reset
   5213 			 * FIFO pointers, restart transmitter, and kick
   5214 			 * the packet queue.
   5215 			 */
   5216 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5217 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5218 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5219 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5220 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5221 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5222 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5223 			CSR_WRITE_FLUSH(sc);
   5224 
   5225 			txq->txq_fifo_head = 0;
   5226 			txq->txq_fifo_stall = 0;
   5227 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5228 		} else {
   5229 			/*
   5230 			 * Still waiting for packets to drain; try again in
   5231 			 * another tick.
   5232 			 */
   5233 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5234 		}
   5235 	}
   5236 
   5237 out:
   5238 	mutex_exit(txq->txq_lock);
   5239 }
   5240 
   5241 /*
   5242  * wm_82547_txfifo_bugchk:
   5243  *
   5244  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5245  *	prevent enqueueing a packet that would wrap around the end
   5246  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5247  *
   5248  *	We do this by checking the amount of space before the end
   5249  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5250  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5251  *	the internal FIFO pointers to the beginning, and restart
   5252  *	transmission on the interface.
   5253  */
   5254 #define	WM_FIFO_HDR		0x10
   5255 #define	WM_82547_PAD_LEN	0x3e0
   5256 static int
   5257 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5258 {
   5259 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5260 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5261 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5262 
   5263 	/* Just return if already stalled. */
   5264 	if (txq->txq_fifo_stall)
   5265 		return 1;
   5266 
   5267 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5268 		/* Stall only occurs in half-duplex mode. */
   5269 		goto send_packet;
   5270 	}
   5271 
   5272 	if (len >= WM_82547_PAD_LEN + space) {
   5273 		txq->txq_fifo_stall = 1;
   5274 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5275 		return 1;
   5276 	}
   5277 
   5278  send_packet:
   5279 	txq->txq_fifo_head += len;
   5280 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5281 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5282 
   5283 	return 0;
   5284 }
   5285 
   5286 static int
   5287 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5288 {
   5289 	int error;
   5290 
   5291 	/*
   5292 	 * Allocate the control data structures, and create and load the
   5293 	 * DMA map for it.
   5294 	 *
   5295 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5296 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5297 	 * both sets within the same 4G segment.
   5298 	 */
   5299 	if (sc->sc_type < WM_T_82544)
   5300 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5301 	else
   5302 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5303 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5304 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5305 	else
   5306 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5307 
   5308 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5309 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5310 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5311 		aprint_error_dev(sc->sc_dev,
   5312 		    "unable to allocate TX control data, error = %d\n",
   5313 		    error);
   5314 		goto fail_0;
   5315 	}
   5316 
   5317 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5318 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5319 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5320 		aprint_error_dev(sc->sc_dev,
   5321 		    "unable to map TX control data, error = %d\n", error);
   5322 		goto fail_1;
   5323 	}
   5324 
   5325 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5326 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5327 		aprint_error_dev(sc->sc_dev,
   5328 		    "unable to create TX control data DMA map, error = %d\n",
   5329 		    error);
   5330 		goto fail_2;
   5331 	}
   5332 
   5333 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5334 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5335 		aprint_error_dev(sc->sc_dev,
   5336 		    "unable to load TX control data DMA map, error = %d\n",
   5337 		    error);
   5338 		goto fail_3;
   5339 	}
   5340 
   5341 	return 0;
   5342 
   5343  fail_3:
   5344 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5345  fail_2:
   5346 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5347 	    WM_TXDESCS_SIZE(txq));
   5348  fail_1:
   5349 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5350  fail_0:
   5351 	return error;
   5352 }
   5353 
   5354 static void
   5355 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5356 {
   5357 
   5358 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5359 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5360 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5361 	    WM_TXDESCS_SIZE(txq));
   5362 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5363 }
   5364 
   5365 static int
   5366 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5367 {
   5368 	int error;
   5369 
   5370 	/*
   5371 	 * Allocate the control data structures, and create and load the
   5372 	 * DMA map for it.
   5373 	 *
   5374 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5375 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5376 	 * both sets within the same 4G segment.
   5377 	 */
   5378 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5379 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5380 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5381 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5382 		aprint_error_dev(sc->sc_dev,
   5383 		    "unable to allocate RX control data, error = %d\n",
   5384 		    error);
   5385 		goto fail_0;
   5386 	}
   5387 
   5388 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5389 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5390 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5391 		aprint_error_dev(sc->sc_dev,
   5392 		    "unable to map RX control data, error = %d\n", error);
   5393 		goto fail_1;
   5394 	}
   5395 
   5396 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5397 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5398 		aprint_error_dev(sc->sc_dev,
   5399 		    "unable to create RX control data DMA map, error = %d\n",
   5400 		    error);
   5401 		goto fail_2;
   5402 	}
   5403 
   5404 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5405 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5406 		aprint_error_dev(sc->sc_dev,
   5407 		    "unable to load RX control data DMA map, error = %d\n",
   5408 		    error);
   5409 		goto fail_3;
   5410 	}
   5411 
   5412 	return 0;
   5413 
   5414  fail_3:
   5415 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5416  fail_2:
   5417 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5418 	    rxq->rxq_desc_size);
   5419  fail_1:
   5420 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5421  fail_0:
   5422 	return error;
   5423 }
   5424 
   5425 static void
   5426 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5427 {
   5428 
   5429 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5430 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5431 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5432 	    rxq->rxq_desc_size);
   5433 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5434 }
   5435 
   5436 
   5437 static int
   5438 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5439 {
   5440 	int i, error;
   5441 
   5442 	/* Create the transmit buffer DMA maps. */
   5443 	WM_TXQUEUELEN(txq) =
   5444 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5445 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5446 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5447 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5448 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5449 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5450 			aprint_error_dev(sc->sc_dev,
   5451 			    "unable to create Tx DMA map %d, error = %d\n",
   5452 			    i, error);
   5453 			goto fail;
   5454 		}
   5455 	}
   5456 
   5457 	return 0;
   5458 
   5459  fail:
   5460 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5461 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5462 			bus_dmamap_destroy(sc->sc_dmat,
   5463 			    txq->txq_soft[i].txs_dmamap);
   5464 	}
   5465 	return error;
   5466 }
   5467 
   5468 static void
   5469 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5470 {
   5471 	int i;
   5472 
   5473 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5474 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5475 			bus_dmamap_destroy(sc->sc_dmat,
   5476 			    txq->txq_soft[i].txs_dmamap);
   5477 	}
   5478 }
   5479 
   5480 static int
   5481 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5482 {
   5483 	int i, error;
   5484 
   5485 	/* Create the receive buffer DMA maps. */
   5486 	for (i = 0; i < WM_NRXDESC; i++) {
   5487 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5488 			    MCLBYTES, 0, 0,
   5489 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5490 			aprint_error_dev(sc->sc_dev,
   5491 			    "unable to create Rx DMA map %d error = %d\n",
   5492 			    i, error);
   5493 			goto fail;
   5494 		}
   5495 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5496 	}
   5497 
   5498 	return 0;
   5499 
   5500  fail:
   5501 	for (i = 0; i < WM_NRXDESC; i++) {
   5502 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5503 			bus_dmamap_destroy(sc->sc_dmat,
   5504 			    rxq->rxq_soft[i].rxs_dmamap);
   5505 	}
   5506 	return error;
   5507 }
   5508 
   5509 static void
   5510 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5511 {
   5512 	int i;
   5513 
   5514 	for (i = 0; i < WM_NRXDESC; i++) {
   5515 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5516 			bus_dmamap_destroy(sc->sc_dmat,
   5517 			    rxq->rxq_soft[i].rxs_dmamap);
   5518 	}
   5519 }
   5520 
   5521 /*
   5522  * wm_alloc_quques:
   5523  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5524  */
   5525 static int
   5526 wm_alloc_txrx_queues(struct wm_softc *sc)
   5527 {
   5528 	int i, error, tx_done, rx_done;
   5529 
   5530 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5531 	    KM_SLEEP);
   5532 	if (sc->sc_queue == NULL) {
   5533 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5534 		error = ENOMEM;
   5535 		goto fail_0;
   5536 	}
   5537 
   5538 	/*
   5539 	 * For transmission
   5540 	 */
   5541 	error = 0;
   5542 	tx_done = 0;
   5543 	for (i = 0; i < sc->sc_nqueues; i++) {
   5544 #ifdef WM_EVENT_COUNTERS
   5545 		int j;
   5546 		const char *xname;
   5547 #endif
   5548 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5549 		txq->txq_sc = sc;
   5550 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5551 
   5552 		error = wm_alloc_tx_descs(sc, txq);
   5553 		if (error)
   5554 			break;
   5555 		error = wm_alloc_tx_buffer(sc, txq);
   5556 		if (error) {
   5557 			wm_free_tx_descs(sc, txq);
   5558 			break;
   5559 		}
   5560 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5561 		if (txq->txq_interq == NULL) {
   5562 			wm_free_tx_descs(sc, txq);
   5563 			wm_free_tx_buffer(sc, txq);
   5564 			error = ENOMEM;
   5565 			break;
   5566 		}
   5567 
   5568 #ifdef WM_EVENT_COUNTERS
   5569 		xname = device_xname(sc->sc_dev);
   5570 
   5571 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5572 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5573 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5574 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5575 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5576 
   5577 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5578 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5579 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5580 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5581 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5582 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5583 
   5584 		for (j = 0; j < WM_NTXSEGS; j++) {
   5585 			snprintf(txq->txq_txseg_evcnt_names[j],
   5586 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5587 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5588 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5589 		}
   5590 
   5591 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5592 
   5593 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5594 #endif /* WM_EVENT_COUNTERS */
   5595 
   5596 		tx_done++;
   5597 	}
   5598 	if (error)
   5599 		goto fail_1;
   5600 
   5601 	/*
   5602 	 * For recieve
   5603 	 */
   5604 	error = 0;
   5605 	rx_done = 0;
   5606 	for (i = 0; i < sc->sc_nqueues; i++) {
   5607 #ifdef WM_EVENT_COUNTERS
   5608 		const char *xname;
   5609 #endif
   5610 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5611 		rxq->rxq_sc = sc;
   5612 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5613 
   5614 		error = wm_alloc_rx_descs(sc, rxq);
   5615 		if (error)
   5616 			break;
   5617 
   5618 		error = wm_alloc_rx_buffer(sc, rxq);
   5619 		if (error) {
   5620 			wm_free_rx_descs(sc, rxq);
   5621 			break;
   5622 		}
   5623 
   5624 #ifdef WM_EVENT_COUNTERS
   5625 		xname = device_xname(sc->sc_dev);
   5626 
   5627 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5628 
   5629 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5630 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5631 #endif /* WM_EVENT_COUNTERS */
   5632 
   5633 		rx_done++;
   5634 	}
   5635 	if (error)
   5636 		goto fail_2;
   5637 
   5638 	return 0;
   5639 
   5640  fail_2:
   5641 	for (i = 0; i < rx_done; i++) {
   5642 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5643 		wm_free_rx_buffer(sc, rxq);
   5644 		wm_free_rx_descs(sc, rxq);
   5645 		if (rxq->rxq_lock)
   5646 			mutex_obj_free(rxq->rxq_lock);
   5647 	}
   5648  fail_1:
   5649 	for (i = 0; i < tx_done; i++) {
   5650 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5651 		pcq_destroy(txq->txq_interq);
   5652 		wm_free_tx_buffer(sc, txq);
   5653 		wm_free_tx_descs(sc, txq);
   5654 		if (txq->txq_lock)
   5655 			mutex_obj_free(txq->txq_lock);
   5656 	}
   5657 
   5658 	kmem_free(sc->sc_queue,
   5659 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5660  fail_0:
   5661 	return error;
   5662 }
   5663 
   5664 /*
   5665  * wm_free_quques:
   5666  *	Free {tx,rx}descs and {tx,rx} buffers
   5667  */
   5668 static void
   5669 wm_free_txrx_queues(struct wm_softc *sc)
   5670 {
   5671 	int i;
   5672 
   5673 	for (i = 0; i < sc->sc_nqueues; i++) {
   5674 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5675 		wm_free_rx_buffer(sc, rxq);
   5676 		wm_free_rx_descs(sc, rxq);
   5677 		if (rxq->rxq_lock)
   5678 			mutex_obj_free(rxq->rxq_lock);
   5679 	}
   5680 
   5681 	for (i = 0; i < sc->sc_nqueues; i++) {
   5682 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5683 		wm_free_tx_buffer(sc, txq);
   5684 		wm_free_tx_descs(sc, txq);
   5685 		if (txq->txq_lock)
   5686 			mutex_obj_free(txq->txq_lock);
   5687 	}
   5688 
   5689 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5690 }
   5691 
   5692 static void
   5693 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5694 {
   5695 
   5696 	KASSERT(mutex_owned(txq->txq_lock));
   5697 
   5698 	/* Initialize the transmit descriptor ring. */
   5699 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5700 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5701 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5702 	txq->txq_free = WM_NTXDESC(txq);
   5703 	txq->txq_next = 0;
   5704 }
   5705 
   5706 static void
   5707 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5708     struct wm_txqueue *txq)
   5709 {
   5710 
   5711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5712 		device_xname(sc->sc_dev), __func__));
   5713 	KASSERT(mutex_owned(txq->txq_lock));
   5714 
   5715 	if (sc->sc_type < WM_T_82543) {
   5716 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5717 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5718 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5719 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5720 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5721 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5722 	} else {
   5723 		int qid = wmq->wmq_id;
   5724 
   5725 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5726 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5727 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5728 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5729 
   5730 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5731 			/*
   5732 			 * Don't write TDT before TCTL.EN is set.
   5733 			 * See the document.
   5734 			 */
   5735 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5736 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5737 			    | TXDCTL_WTHRESH(0));
   5738 		else {
   5739 			/* ITR / 4 */
   5740 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5741 			if (sc->sc_type >= WM_T_82540) {
   5742 				/* should be same */
   5743 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5744 			}
   5745 
   5746 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5747 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5748 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5749 		}
   5750 	}
   5751 }
   5752 
   5753 static void
   5754 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5755 {
   5756 	int i;
   5757 
   5758 	KASSERT(mutex_owned(txq->txq_lock));
   5759 
   5760 	/* Initialize the transmit job descriptors. */
   5761 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5762 		txq->txq_soft[i].txs_mbuf = NULL;
   5763 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5764 	txq->txq_snext = 0;
   5765 	txq->txq_sdirty = 0;
   5766 }
   5767 
   5768 static void
   5769 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5770     struct wm_txqueue *txq)
   5771 {
   5772 
   5773 	KASSERT(mutex_owned(txq->txq_lock));
   5774 
   5775 	/*
   5776 	 * Set up some register offsets that are different between
   5777 	 * the i82542 and the i82543 and later chips.
   5778 	 */
   5779 	if (sc->sc_type < WM_T_82543)
   5780 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5781 	else
   5782 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5783 
   5784 	wm_init_tx_descs(sc, txq);
   5785 	wm_init_tx_regs(sc, wmq, txq);
   5786 	wm_init_tx_buffer(sc, txq);
   5787 }
   5788 
   5789 static void
   5790 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5791     struct wm_rxqueue *rxq)
   5792 {
   5793 
   5794 	KASSERT(mutex_owned(rxq->rxq_lock));
   5795 
   5796 	/*
   5797 	 * Initialize the receive descriptor and receive job
   5798 	 * descriptor rings.
   5799 	 */
   5800 	if (sc->sc_type < WM_T_82543) {
   5801 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5802 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5803 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5804 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5805 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5806 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5807 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5808 
   5809 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5810 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5811 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5812 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5813 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5814 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5815 	} else {
   5816 		int qid = wmq->wmq_id;
   5817 
   5818 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5819 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5820 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5821 
   5822 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5823 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5824 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5825 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5826 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5827 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5828 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5829 			    | RXDCTL_WTHRESH(1));
   5830 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5831 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5832 		} else {
   5833 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5834 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5835 			/* ITR / 4 */
   5836 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5837 			/* MUST be same */
   5838 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5839 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5840 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5841 		}
   5842 	}
   5843 }
   5844 
   5845 static int
   5846 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5847 {
   5848 	struct wm_rxsoft *rxs;
   5849 	int error, i;
   5850 
   5851 	KASSERT(mutex_owned(rxq->rxq_lock));
   5852 
   5853 	for (i = 0; i < WM_NRXDESC; i++) {
   5854 		rxs = &rxq->rxq_soft[i];
   5855 		if (rxs->rxs_mbuf == NULL) {
   5856 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5857 				log(LOG_ERR, "%s: unable to allocate or map "
   5858 				    "rx buffer %d, error = %d\n",
   5859 				    device_xname(sc->sc_dev), i, error);
   5860 				/*
   5861 				 * XXX Should attempt to run with fewer receive
   5862 				 * XXX buffers instead of just failing.
   5863 				 */
   5864 				wm_rxdrain(rxq);
   5865 				return ENOMEM;
   5866 			}
   5867 		} else {
   5868 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5869 				wm_init_rxdesc(rxq, i);
   5870 			/*
   5871 			 * For 82575 and newer device, the RX descriptors
   5872 			 * must be initialized after the setting of RCTL.EN in
   5873 			 * wm_set_filter()
   5874 			 */
   5875 		}
   5876 	}
   5877 	rxq->rxq_ptr = 0;
   5878 	rxq->rxq_discard = 0;
   5879 	WM_RXCHAIN_RESET(rxq);
   5880 
   5881 	return 0;
   5882 }
   5883 
   5884 static int
   5885 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5886     struct wm_rxqueue *rxq)
   5887 {
   5888 
   5889 	KASSERT(mutex_owned(rxq->rxq_lock));
   5890 
   5891 	/*
   5892 	 * Set up some register offsets that are different between
   5893 	 * the i82542 and the i82543 and later chips.
   5894 	 */
   5895 	if (sc->sc_type < WM_T_82543)
   5896 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5897 	else
   5898 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   5899 
   5900 	wm_init_rx_regs(sc, wmq, rxq);
   5901 	return wm_init_rx_buffer(sc, rxq);
   5902 }
   5903 
   5904 /*
   5905  * wm_init_quques:
   5906  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5907  */
   5908 static int
   5909 wm_init_txrx_queues(struct wm_softc *sc)
   5910 {
   5911 	int i, error = 0;
   5912 
   5913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5914 		device_xname(sc->sc_dev), __func__));
   5915 
   5916 	for (i = 0; i < sc->sc_nqueues; i++) {
   5917 		struct wm_queue *wmq = &sc->sc_queue[i];
   5918 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5919 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5920 
   5921 		mutex_enter(txq->txq_lock);
   5922 		wm_init_tx_queue(sc, wmq, txq);
   5923 		mutex_exit(txq->txq_lock);
   5924 
   5925 		mutex_enter(rxq->rxq_lock);
   5926 		error = wm_init_rx_queue(sc, wmq, rxq);
   5927 		mutex_exit(rxq->rxq_lock);
   5928 		if (error)
   5929 			break;
   5930 	}
   5931 
   5932 	return error;
   5933 }
   5934 
   5935 /*
   5936  * wm_tx_offload:
   5937  *
   5938  *	Set up TCP/IP checksumming parameters for the
   5939  *	specified packet.
   5940  */
   5941 static int
   5942 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5943     uint8_t *fieldsp)
   5944 {
   5945 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5946 	struct mbuf *m0 = txs->txs_mbuf;
   5947 	struct livengood_tcpip_ctxdesc *t;
   5948 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5949 	uint32_t ipcse;
   5950 	struct ether_header *eh;
   5951 	int offset, iphl;
   5952 	uint8_t fields;
   5953 
   5954 	/*
   5955 	 * XXX It would be nice if the mbuf pkthdr had offset
   5956 	 * fields for the protocol headers.
   5957 	 */
   5958 
   5959 	eh = mtod(m0, struct ether_header *);
   5960 	switch (htons(eh->ether_type)) {
   5961 	case ETHERTYPE_IP:
   5962 	case ETHERTYPE_IPV6:
   5963 		offset = ETHER_HDR_LEN;
   5964 		break;
   5965 
   5966 	case ETHERTYPE_VLAN:
   5967 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5968 		break;
   5969 
   5970 	default:
   5971 		/*
   5972 		 * Don't support this protocol or encapsulation.
   5973 		 */
   5974 		*fieldsp = 0;
   5975 		*cmdp = 0;
   5976 		return 0;
   5977 	}
   5978 
   5979 	if ((m0->m_pkthdr.csum_flags &
   5980 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5981 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5982 	} else {
   5983 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5984 	}
   5985 	ipcse = offset + iphl - 1;
   5986 
   5987 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5988 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5989 	seg = 0;
   5990 	fields = 0;
   5991 
   5992 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5993 		int hlen = offset + iphl;
   5994 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5995 
   5996 		if (__predict_false(m0->m_len <
   5997 				    (hlen + sizeof(struct tcphdr)))) {
   5998 			/*
   5999 			 * TCP/IP headers are not in the first mbuf; we need
   6000 			 * to do this the slow and painful way.  Let's just
   6001 			 * hope this doesn't happen very often.
   6002 			 */
   6003 			struct tcphdr th;
   6004 
   6005 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6006 
   6007 			m_copydata(m0, hlen, sizeof(th), &th);
   6008 			if (v4) {
   6009 				struct ip ip;
   6010 
   6011 				m_copydata(m0, offset, sizeof(ip), &ip);
   6012 				ip.ip_len = 0;
   6013 				m_copyback(m0,
   6014 				    offset + offsetof(struct ip, ip_len),
   6015 				    sizeof(ip.ip_len), &ip.ip_len);
   6016 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6017 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6018 			} else {
   6019 				struct ip6_hdr ip6;
   6020 
   6021 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6022 				ip6.ip6_plen = 0;
   6023 				m_copyback(m0,
   6024 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6025 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6026 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6027 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6028 			}
   6029 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6030 			    sizeof(th.th_sum), &th.th_sum);
   6031 
   6032 			hlen += th.th_off << 2;
   6033 		} else {
   6034 			/*
   6035 			 * TCP/IP headers are in the first mbuf; we can do
   6036 			 * this the easy way.
   6037 			 */
   6038 			struct tcphdr *th;
   6039 
   6040 			if (v4) {
   6041 				struct ip *ip =
   6042 				    (void *)(mtod(m0, char *) + offset);
   6043 				th = (void *)(mtod(m0, char *) + hlen);
   6044 
   6045 				ip->ip_len = 0;
   6046 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6047 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6048 			} else {
   6049 				struct ip6_hdr *ip6 =
   6050 				    (void *)(mtod(m0, char *) + offset);
   6051 				th = (void *)(mtod(m0, char *) + hlen);
   6052 
   6053 				ip6->ip6_plen = 0;
   6054 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6055 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6056 			}
   6057 			hlen += th->th_off << 2;
   6058 		}
   6059 
   6060 		if (v4) {
   6061 			WM_Q_EVCNT_INCR(txq, txtso);
   6062 			cmdlen |= WTX_TCPIP_CMD_IP;
   6063 		} else {
   6064 			WM_Q_EVCNT_INCR(txq, txtso6);
   6065 			ipcse = 0;
   6066 		}
   6067 		cmd |= WTX_TCPIP_CMD_TSE;
   6068 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6069 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6070 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6071 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6072 	}
   6073 
   6074 	/*
   6075 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6076 	 * offload feature, if we load the context descriptor, we
   6077 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6078 	 */
   6079 
   6080 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6081 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6082 	    WTX_TCPIP_IPCSE(ipcse);
   6083 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6084 		WM_Q_EVCNT_INCR(txq, txipsum);
   6085 		fields |= WTX_IXSM;
   6086 	}
   6087 
   6088 	offset += iphl;
   6089 
   6090 	if (m0->m_pkthdr.csum_flags &
   6091 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6092 		WM_Q_EVCNT_INCR(txq, txtusum);
   6093 		fields |= WTX_TXSM;
   6094 		tucs = WTX_TCPIP_TUCSS(offset) |
   6095 		    WTX_TCPIP_TUCSO(offset +
   6096 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6097 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6098 	} else if ((m0->m_pkthdr.csum_flags &
   6099 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6100 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6101 		fields |= WTX_TXSM;
   6102 		tucs = WTX_TCPIP_TUCSS(offset) |
   6103 		    WTX_TCPIP_TUCSO(offset +
   6104 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6105 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6106 	} else {
   6107 		/* Just initialize it to a valid TCP context. */
   6108 		tucs = WTX_TCPIP_TUCSS(offset) |
   6109 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6110 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6111 	}
   6112 
   6113 	/* Fill in the context descriptor. */
   6114 	t = (struct livengood_tcpip_ctxdesc *)
   6115 	    &txq->txq_descs[txq->txq_next];
   6116 	t->tcpip_ipcs = htole32(ipcs);
   6117 	t->tcpip_tucs = htole32(tucs);
   6118 	t->tcpip_cmdlen = htole32(cmdlen);
   6119 	t->tcpip_seg = htole32(seg);
   6120 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6121 
   6122 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6123 	txs->txs_ndesc++;
   6124 
   6125 	*cmdp = cmd;
   6126 	*fieldsp = fields;
   6127 
   6128 	return 0;
   6129 }
   6130 
   6131 /*
   6132  * wm_start:		[ifnet interface function]
   6133  *
   6134  *	Start packet transmission on the interface.
   6135  */
   6136 static void
   6137 wm_start(struct ifnet *ifp)
   6138 {
   6139 	struct wm_softc *sc = ifp->if_softc;
   6140 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6141 
   6142 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6143 
   6144 	mutex_enter(txq->txq_lock);
   6145 	if (!sc->sc_stopping)
   6146 		wm_start_locked(ifp);
   6147 	mutex_exit(txq->txq_lock);
   6148 }
   6149 
   6150 static void
   6151 wm_start_locked(struct ifnet *ifp)
   6152 {
   6153 	struct wm_softc *sc = ifp->if_softc;
   6154 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6155 	struct mbuf *m0;
   6156 	struct m_tag *mtag;
   6157 	struct wm_txsoft *txs;
   6158 	bus_dmamap_t dmamap;
   6159 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6160 	bus_addr_t curaddr;
   6161 	bus_size_t seglen, curlen;
   6162 	uint32_t cksumcmd;
   6163 	uint8_t cksumfields;
   6164 
   6165 	KASSERT(mutex_owned(txq->txq_lock));
   6166 
   6167 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6168 		return;
   6169 
   6170 	/* Remember the previous number of free descriptors. */
   6171 	ofree = txq->txq_free;
   6172 
   6173 	/*
   6174 	 * Loop through the send queue, setting up transmit descriptors
   6175 	 * until we drain the queue, or use up all available transmit
   6176 	 * descriptors.
   6177 	 */
   6178 	for (;;) {
   6179 		m0 = NULL;
   6180 
   6181 		/* Get a work queue entry. */
   6182 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6183 			wm_txeof(sc, txq);
   6184 			if (txq->txq_sfree == 0) {
   6185 				DPRINTF(WM_DEBUG_TX,
   6186 				    ("%s: TX: no free job descriptors\n",
   6187 					device_xname(sc->sc_dev)));
   6188 				WM_Q_EVCNT_INCR(txq, txsstall);
   6189 				break;
   6190 			}
   6191 		}
   6192 
   6193 		/* Grab a packet off the queue. */
   6194 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6195 		if (m0 == NULL)
   6196 			break;
   6197 
   6198 		DPRINTF(WM_DEBUG_TX,
   6199 		    ("%s: TX: have packet to transmit: %p\n",
   6200 		    device_xname(sc->sc_dev), m0));
   6201 
   6202 		txs = &txq->txq_soft[txq->txq_snext];
   6203 		dmamap = txs->txs_dmamap;
   6204 
   6205 		use_tso = (m0->m_pkthdr.csum_flags &
   6206 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6207 
   6208 		/*
   6209 		 * So says the Linux driver:
   6210 		 * The controller does a simple calculation to make sure
   6211 		 * there is enough room in the FIFO before initiating the
   6212 		 * DMA for each buffer.  The calc is:
   6213 		 *	4 = ceil(buffer len / MSS)
   6214 		 * To make sure we don't overrun the FIFO, adjust the max
   6215 		 * buffer len if the MSS drops.
   6216 		 */
   6217 		dmamap->dm_maxsegsz =
   6218 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6219 		    ? m0->m_pkthdr.segsz << 2
   6220 		    : WTX_MAX_LEN;
   6221 
   6222 		/*
   6223 		 * Load the DMA map.  If this fails, the packet either
   6224 		 * didn't fit in the allotted number of segments, or we
   6225 		 * were short on resources.  For the too-many-segments
   6226 		 * case, we simply report an error and drop the packet,
   6227 		 * since we can't sanely copy a jumbo packet to a single
   6228 		 * buffer.
   6229 		 */
   6230 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6231 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6232 		if (error) {
   6233 			if (error == EFBIG) {
   6234 				WM_Q_EVCNT_INCR(txq, txdrop);
   6235 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6236 				    "DMA segments, dropping...\n",
   6237 				    device_xname(sc->sc_dev));
   6238 				wm_dump_mbuf_chain(sc, m0);
   6239 				m_freem(m0);
   6240 				continue;
   6241 			}
   6242 			/*  Short on resources, just stop for now. */
   6243 			DPRINTF(WM_DEBUG_TX,
   6244 			    ("%s: TX: dmamap load failed: %d\n",
   6245 			    device_xname(sc->sc_dev), error));
   6246 			break;
   6247 		}
   6248 
   6249 		segs_needed = dmamap->dm_nsegs;
   6250 		if (use_tso) {
   6251 			/* For sentinel descriptor; see below. */
   6252 			segs_needed++;
   6253 		}
   6254 
   6255 		/*
   6256 		 * Ensure we have enough descriptors free to describe
   6257 		 * the packet.  Note, we always reserve one descriptor
   6258 		 * at the end of the ring due to the semantics of the
   6259 		 * TDT register, plus one more in the event we need
   6260 		 * to load offload context.
   6261 		 */
   6262 		if (segs_needed > txq->txq_free - 2) {
   6263 			/*
   6264 			 * Not enough free descriptors to transmit this
   6265 			 * packet.  We haven't committed anything yet,
   6266 			 * so just unload the DMA map, put the packet
   6267 			 * pack on the queue, and punt.  Notify the upper
   6268 			 * layer that there are no more slots left.
   6269 			 */
   6270 			DPRINTF(WM_DEBUG_TX,
   6271 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6272 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6273 			    segs_needed, txq->txq_free - 1));
   6274 			ifp->if_flags |= IFF_OACTIVE;
   6275 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6276 			WM_Q_EVCNT_INCR(txq, txdstall);
   6277 			break;
   6278 		}
   6279 
   6280 		/*
   6281 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6282 		 * once we know we can transmit the packet, since we
   6283 		 * do some internal FIFO space accounting here.
   6284 		 */
   6285 		if (sc->sc_type == WM_T_82547 &&
   6286 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6287 			DPRINTF(WM_DEBUG_TX,
   6288 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6289 			    device_xname(sc->sc_dev)));
   6290 			ifp->if_flags |= IFF_OACTIVE;
   6291 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6292 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6293 			break;
   6294 		}
   6295 
   6296 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6297 
   6298 		DPRINTF(WM_DEBUG_TX,
   6299 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6300 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6301 
   6302 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6303 
   6304 		/*
   6305 		 * Store a pointer to the packet so that we can free it
   6306 		 * later.
   6307 		 *
   6308 		 * Initially, we consider the number of descriptors the
   6309 		 * packet uses the number of DMA segments.  This may be
   6310 		 * incremented by 1 if we do checksum offload (a descriptor
   6311 		 * is used to set the checksum context).
   6312 		 */
   6313 		txs->txs_mbuf = m0;
   6314 		txs->txs_firstdesc = txq->txq_next;
   6315 		txs->txs_ndesc = segs_needed;
   6316 
   6317 		/* Set up offload parameters for this packet. */
   6318 		if (m0->m_pkthdr.csum_flags &
   6319 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6320 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6321 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6322 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6323 					  &cksumfields) != 0) {
   6324 				/* Error message already displayed. */
   6325 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6326 				continue;
   6327 			}
   6328 		} else {
   6329 			cksumcmd = 0;
   6330 			cksumfields = 0;
   6331 		}
   6332 
   6333 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6334 
   6335 		/* Sync the DMA map. */
   6336 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6337 		    BUS_DMASYNC_PREWRITE);
   6338 
   6339 		/* Initialize the transmit descriptor. */
   6340 		for (nexttx = txq->txq_next, seg = 0;
   6341 		     seg < dmamap->dm_nsegs; seg++) {
   6342 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6343 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6344 			     seglen != 0;
   6345 			     curaddr += curlen, seglen -= curlen,
   6346 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6347 				curlen = seglen;
   6348 
   6349 				/*
   6350 				 * So says the Linux driver:
   6351 				 * Work around for premature descriptor
   6352 				 * write-backs in TSO mode.  Append a
   6353 				 * 4-byte sentinel descriptor.
   6354 				 */
   6355 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6356 				    curlen > 8)
   6357 					curlen -= 4;
   6358 
   6359 				wm_set_dma_addr(
   6360 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6361 				txq->txq_descs[nexttx].wtx_cmdlen
   6362 				    = htole32(cksumcmd | curlen);
   6363 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6364 				    = 0;
   6365 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6366 				    = cksumfields;
   6367 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6368 				lasttx = nexttx;
   6369 
   6370 				DPRINTF(WM_DEBUG_TX,
   6371 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6372 				     "len %#04zx\n",
   6373 				    device_xname(sc->sc_dev), nexttx,
   6374 				    (uint64_t)curaddr, curlen));
   6375 			}
   6376 		}
   6377 
   6378 		KASSERT(lasttx != -1);
   6379 
   6380 		/*
   6381 		 * Set up the command byte on the last descriptor of
   6382 		 * the packet.  If we're in the interrupt delay window,
   6383 		 * delay the interrupt.
   6384 		 */
   6385 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6386 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6387 
   6388 		/*
   6389 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6390 		 * up the descriptor to encapsulate the packet for us.
   6391 		 *
   6392 		 * This is only valid on the last descriptor of the packet.
   6393 		 */
   6394 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6395 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6396 			    htole32(WTX_CMD_VLE);
   6397 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6398 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6399 		}
   6400 
   6401 		txs->txs_lastdesc = lasttx;
   6402 
   6403 		DPRINTF(WM_DEBUG_TX,
   6404 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6405 		    device_xname(sc->sc_dev),
   6406 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6407 
   6408 		/* Sync the descriptors we're using. */
   6409 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6410 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6411 
   6412 		/* Give the packet to the chip. */
   6413 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6414 
   6415 		DPRINTF(WM_DEBUG_TX,
   6416 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6417 
   6418 		DPRINTF(WM_DEBUG_TX,
   6419 		    ("%s: TX: finished transmitting packet, job %d\n",
   6420 		    device_xname(sc->sc_dev), txq->txq_snext));
   6421 
   6422 		/* Advance the tx pointer. */
   6423 		txq->txq_free -= txs->txs_ndesc;
   6424 		txq->txq_next = nexttx;
   6425 
   6426 		txq->txq_sfree--;
   6427 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6428 
   6429 		/* Pass the packet to any BPF listeners. */
   6430 		bpf_mtap(ifp, m0);
   6431 	}
   6432 
   6433 	if (m0 != NULL) {
   6434 		ifp->if_flags |= IFF_OACTIVE;
   6435 		WM_Q_EVCNT_INCR(txq, txdrop);
   6436 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6437 			__func__));
   6438 		m_freem(m0);
   6439 	}
   6440 
   6441 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6442 		/* No more slots; notify upper layer. */
   6443 		ifp->if_flags |= IFF_OACTIVE;
   6444 	}
   6445 
   6446 	if (txq->txq_free != ofree) {
   6447 		/* Set a watchdog timer in case the chip flakes out. */
   6448 		ifp->if_timer = 5;
   6449 	}
   6450 }
   6451 
   6452 /*
   6453  * wm_nq_tx_offload:
   6454  *
   6455  *	Set up TCP/IP checksumming parameters for the
   6456  *	specified packet, for NEWQUEUE devices
   6457  */
   6458 static int
   6459 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6460     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6461 {
   6462 	struct mbuf *m0 = txs->txs_mbuf;
   6463 	struct m_tag *mtag;
   6464 	uint32_t vl_len, mssidx, cmdc;
   6465 	struct ether_header *eh;
   6466 	int offset, iphl;
   6467 
   6468 	/*
   6469 	 * XXX It would be nice if the mbuf pkthdr had offset
   6470 	 * fields for the protocol headers.
   6471 	 */
   6472 	*cmdlenp = 0;
   6473 	*fieldsp = 0;
   6474 
   6475 	eh = mtod(m0, struct ether_header *);
   6476 	switch (htons(eh->ether_type)) {
   6477 	case ETHERTYPE_IP:
   6478 	case ETHERTYPE_IPV6:
   6479 		offset = ETHER_HDR_LEN;
   6480 		break;
   6481 
   6482 	case ETHERTYPE_VLAN:
   6483 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6484 		break;
   6485 
   6486 	default:
   6487 		/* Don't support this protocol or encapsulation. */
   6488 		*do_csum = false;
   6489 		return 0;
   6490 	}
   6491 	*do_csum = true;
   6492 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6493 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6494 
   6495 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6496 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6497 
   6498 	if ((m0->m_pkthdr.csum_flags &
   6499 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6500 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6501 	} else {
   6502 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6503 	}
   6504 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6505 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6506 
   6507 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6508 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6509 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6510 		*cmdlenp |= NQTX_CMD_VLE;
   6511 	}
   6512 
   6513 	mssidx = 0;
   6514 
   6515 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6516 		int hlen = offset + iphl;
   6517 		int tcp_hlen;
   6518 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6519 
   6520 		if (__predict_false(m0->m_len <
   6521 				    (hlen + sizeof(struct tcphdr)))) {
   6522 			/*
   6523 			 * TCP/IP headers are not in the first mbuf; we need
   6524 			 * to do this the slow and painful way.  Let's just
   6525 			 * hope this doesn't happen very often.
   6526 			 */
   6527 			struct tcphdr th;
   6528 
   6529 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6530 
   6531 			m_copydata(m0, hlen, sizeof(th), &th);
   6532 			if (v4) {
   6533 				struct ip ip;
   6534 
   6535 				m_copydata(m0, offset, sizeof(ip), &ip);
   6536 				ip.ip_len = 0;
   6537 				m_copyback(m0,
   6538 				    offset + offsetof(struct ip, ip_len),
   6539 				    sizeof(ip.ip_len), &ip.ip_len);
   6540 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6541 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6542 			} else {
   6543 				struct ip6_hdr ip6;
   6544 
   6545 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6546 				ip6.ip6_plen = 0;
   6547 				m_copyback(m0,
   6548 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6549 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6550 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6551 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6552 			}
   6553 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6554 			    sizeof(th.th_sum), &th.th_sum);
   6555 
   6556 			tcp_hlen = th.th_off << 2;
   6557 		} else {
   6558 			/*
   6559 			 * TCP/IP headers are in the first mbuf; we can do
   6560 			 * this the easy way.
   6561 			 */
   6562 			struct tcphdr *th;
   6563 
   6564 			if (v4) {
   6565 				struct ip *ip =
   6566 				    (void *)(mtod(m0, char *) + offset);
   6567 				th = (void *)(mtod(m0, char *) + hlen);
   6568 
   6569 				ip->ip_len = 0;
   6570 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6571 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6572 			} else {
   6573 				struct ip6_hdr *ip6 =
   6574 				    (void *)(mtod(m0, char *) + offset);
   6575 				th = (void *)(mtod(m0, char *) + hlen);
   6576 
   6577 				ip6->ip6_plen = 0;
   6578 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6579 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6580 			}
   6581 			tcp_hlen = th->th_off << 2;
   6582 		}
   6583 		hlen += tcp_hlen;
   6584 		*cmdlenp |= NQTX_CMD_TSE;
   6585 
   6586 		if (v4) {
   6587 			WM_Q_EVCNT_INCR(txq, txtso);
   6588 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6589 		} else {
   6590 			WM_Q_EVCNT_INCR(txq, txtso6);
   6591 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6592 		}
   6593 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6594 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6595 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6596 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6597 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6598 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6599 	} else {
   6600 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6601 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6602 	}
   6603 
   6604 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6605 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6606 		cmdc |= NQTXC_CMD_IP4;
   6607 	}
   6608 
   6609 	if (m0->m_pkthdr.csum_flags &
   6610 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6611 		WM_Q_EVCNT_INCR(txq, txtusum);
   6612 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6613 			cmdc |= NQTXC_CMD_TCP;
   6614 		} else {
   6615 			cmdc |= NQTXC_CMD_UDP;
   6616 		}
   6617 		cmdc |= NQTXC_CMD_IP4;
   6618 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6619 	}
   6620 	if (m0->m_pkthdr.csum_flags &
   6621 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6622 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6623 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6624 			cmdc |= NQTXC_CMD_TCP;
   6625 		} else {
   6626 			cmdc |= NQTXC_CMD_UDP;
   6627 		}
   6628 		cmdc |= NQTXC_CMD_IP6;
   6629 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6630 	}
   6631 
   6632 	/* Fill in the context descriptor. */
   6633 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6634 	    htole32(vl_len);
   6635 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6636 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6637 	    htole32(cmdc);
   6638 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6639 	    htole32(mssidx);
   6640 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6641 	DPRINTF(WM_DEBUG_TX,
   6642 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6643 	    txq->txq_next, 0, vl_len));
   6644 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6645 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6646 	txs->txs_ndesc++;
   6647 	return 0;
   6648 }
   6649 
   6650 /*
   6651  * wm_nq_start:		[ifnet interface function]
   6652  *
   6653  *	Start packet transmission on the interface for NEWQUEUE devices
   6654  */
   6655 static void
   6656 wm_nq_start(struct ifnet *ifp)
   6657 {
   6658 	struct wm_softc *sc = ifp->if_softc;
   6659 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6660 
   6661 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6662 
   6663 	mutex_enter(txq->txq_lock);
   6664 	if (!sc->sc_stopping)
   6665 		wm_nq_start_locked(ifp);
   6666 	mutex_exit(txq->txq_lock);
   6667 }
   6668 
   6669 static void
   6670 wm_nq_start_locked(struct ifnet *ifp)
   6671 {
   6672 	struct wm_softc *sc = ifp->if_softc;
   6673 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6674 
   6675 	wm_nq_send_common_locked(ifp, txq, false);
   6676 }
   6677 
   6678 static inline int
   6679 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6680 {
   6681 	struct wm_softc *sc = ifp->if_softc;
   6682 	u_int cpuid = cpu_index(curcpu());
   6683 
   6684 	/*
   6685 	 * Currently, simple distribute strategy.
   6686 	 * TODO:
   6687 	 * destribute by flowid(RSS has value).
   6688 	 */
   6689 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6690 }
   6691 
   6692 static int
   6693 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6694 {
   6695 	int qid;
   6696 	struct wm_softc *sc = ifp->if_softc;
   6697 	struct wm_txqueue *txq;
   6698 
   6699 	qid = wm_nq_select_txqueue(ifp, m);
   6700 	txq = &sc->sc_queue[qid].wmq_txq;
   6701 
   6702 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6703 		m_freem(m);
   6704 		WM_Q_EVCNT_INCR(txq, txdrop);
   6705 		return ENOBUFS;
   6706 	}
   6707 
   6708 	if (mutex_tryenter(txq->txq_lock)) {
   6709 		/* XXXX should be per TX queue */
   6710 		ifp->if_obytes += m->m_pkthdr.len;
   6711 		if (m->m_flags & M_MCAST)
   6712 			ifp->if_omcasts++;
   6713 
   6714 		if (!sc->sc_stopping)
   6715 			wm_nq_transmit_locked(ifp, txq);
   6716 		mutex_exit(txq->txq_lock);
   6717 	}
   6718 
   6719 	return 0;
   6720 }
   6721 
   6722 static void
   6723 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6724 {
   6725 
   6726 	wm_nq_send_common_locked(ifp, txq, true);
   6727 }
   6728 
   6729 static void
   6730 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6731     bool is_transmit)
   6732 {
   6733 	struct wm_softc *sc = ifp->if_softc;
   6734 	struct mbuf *m0;
   6735 	struct m_tag *mtag;
   6736 	struct wm_txsoft *txs;
   6737 	bus_dmamap_t dmamap;
   6738 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6739 	bool do_csum, sent;
   6740 
   6741 	KASSERT(mutex_owned(txq->txq_lock));
   6742 
   6743 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6744 		return;
   6745 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6746 		return;
   6747 
   6748 	sent = false;
   6749 
   6750 	/*
   6751 	 * Loop through the send queue, setting up transmit descriptors
   6752 	 * until we drain the queue, or use up all available transmit
   6753 	 * descriptors.
   6754 	 */
   6755 	for (;;) {
   6756 		m0 = NULL;
   6757 
   6758 		/* Get a work queue entry. */
   6759 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6760 			wm_txeof(sc, txq);
   6761 			if (txq->txq_sfree == 0) {
   6762 				DPRINTF(WM_DEBUG_TX,
   6763 				    ("%s: TX: no free job descriptors\n",
   6764 					device_xname(sc->sc_dev)));
   6765 				WM_Q_EVCNT_INCR(txq, txsstall);
   6766 				break;
   6767 			}
   6768 		}
   6769 
   6770 		/* Grab a packet off the queue. */
   6771 		if (is_transmit)
   6772 			m0 = pcq_get(txq->txq_interq);
   6773 		else
   6774 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6775 		if (m0 == NULL)
   6776 			break;
   6777 
   6778 		DPRINTF(WM_DEBUG_TX,
   6779 		    ("%s: TX: have packet to transmit: %p\n",
   6780 		    device_xname(sc->sc_dev), m0));
   6781 
   6782 		txs = &txq->txq_soft[txq->txq_snext];
   6783 		dmamap = txs->txs_dmamap;
   6784 
   6785 		/*
   6786 		 * Load the DMA map.  If this fails, the packet either
   6787 		 * didn't fit in the allotted number of segments, or we
   6788 		 * were short on resources.  For the too-many-segments
   6789 		 * case, we simply report an error and drop the packet,
   6790 		 * since we can't sanely copy a jumbo packet to a single
   6791 		 * buffer.
   6792 		 */
   6793 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6794 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6795 		if (error) {
   6796 			if (error == EFBIG) {
   6797 				WM_Q_EVCNT_INCR(txq, txdrop);
   6798 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6799 				    "DMA segments, dropping...\n",
   6800 				    device_xname(sc->sc_dev));
   6801 				wm_dump_mbuf_chain(sc, m0);
   6802 				m_freem(m0);
   6803 				continue;
   6804 			}
   6805 			/* Short on resources, just stop for now. */
   6806 			DPRINTF(WM_DEBUG_TX,
   6807 			    ("%s: TX: dmamap load failed: %d\n",
   6808 			    device_xname(sc->sc_dev), error));
   6809 			break;
   6810 		}
   6811 
   6812 		segs_needed = dmamap->dm_nsegs;
   6813 
   6814 		/*
   6815 		 * Ensure we have enough descriptors free to describe
   6816 		 * the packet.  Note, we always reserve one descriptor
   6817 		 * at the end of the ring due to the semantics of the
   6818 		 * TDT register, plus one more in the event we need
   6819 		 * to load offload context.
   6820 		 */
   6821 		if (segs_needed > txq->txq_free - 2) {
   6822 			/*
   6823 			 * Not enough free descriptors to transmit this
   6824 			 * packet.  We haven't committed anything yet,
   6825 			 * so just unload the DMA map, put the packet
   6826 			 * pack on the queue, and punt.  Notify the upper
   6827 			 * layer that there are no more slots left.
   6828 			 */
   6829 			DPRINTF(WM_DEBUG_TX,
   6830 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6831 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6832 			    segs_needed, txq->txq_free - 1));
   6833 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6834 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6835 			WM_Q_EVCNT_INCR(txq, txdstall);
   6836 			break;
   6837 		}
   6838 
   6839 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6840 
   6841 		DPRINTF(WM_DEBUG_TX,
   6842 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6843 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6844 
   6845 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6846 
   6847 		/*
   6848 		 * Store a pointer to the packet so that we can free it
   6849 		 * later.
   6850 		 *
   6851 		 * Initially, we consider the number of descriptors the
   6852 		 * packet uses the number of DMA segments.  This may be
   6853 		 * incremented by 1 if we do checksum offload (a descriptor
   6854 		 * is used to set the checksum context).
   6855 		 */
   6856 		txs->txs_mbuf = m0;
   6857 		txs->txs_firstdesc = txq->txq_next;
   6858 		txs->txs_ndesc = segs_needed;
   6859 
   6860 		/* Set up offload parameters for this packet. */
   6861 		uint32_t cmdlen, fields, dcmdlen;
   6862 		if (m0->m_pkthdr.csum_flags &
   6863 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6864 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6865 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6866 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6867 			    &do_csum) != 0) {
   6868 				/* Error message already displayed. */
   6869 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6870 				continue;
   6871 			}
   6872 		} else {
   6873 			do_csum = false;
   6874 			cmdlen = 0;
   6875 			fields = 0;
   6876 		}
   6877 
   6878 		/* Sync the DMA map. */
   6879 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6880 		    BUS_DMASYNC_PREWRITE);
   6881 
   6882 		/* Initialize the first transmit descriptor. */
   6883 		nexttx = txq->txq_next;
   6884 		if (!do_csum) {
   6885 			/* setup a legacy descriptor */
   6886 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6887 			    dmamap->dm_segs[0].ds_addr);
   6888 			txq->txq_descs[nexttx].wtx_cmdlen =
   6889 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6890 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6891 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6892 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6893 			    NULL) {
   6894 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6895 				    htole32(WTX_CMD_VLE);
   6896 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6897 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6898 			} else {
   6899 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6900 			}
   6901 			dcmdlen = 0;
   6902 		} else {
   6903 			/* setup an advanced data descriptor */
   6904 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6905 			    htole64(dmamap->dm_segs[0].ds_addr);
   6906 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6907 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6908 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6909 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6910 			    htole32(fields);
   6911 			DPRINTF(WM_DEBUG_TX,
   6912 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6913 			    device_xname(sc->sc_dev), nexttx,
   6914 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6915 			DPRINTF(WM_DEBUG_TX,
   6916 			    ("\t 0x%08x%08x\n", fields,
   6917 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6918 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6919 		}
   6920 
   6921 		lasttx = nexttx;
   6922 		nexttx = WM_NEXTTX(txq, nexttx);
   6923 		/*
   6924 		 * fill in the next descriptors. legacy or adcanced format
   6925 		 * is the same here
   6926 		 */
   6927 		for (seg = 1; seg < dmamap->dm_nsegs;
   6928 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6929 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6930 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6931 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6932 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6933 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6934 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6935 			lasttx = nexttx;
   6936 
   6937 			DPRINTF(WM_DEBUG_TX,
   6938 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6939 			     "len %#04zx\n",
   6940 			    device_xname(sc->sc_dev), nexttx,
   6941 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6942 			    dmamap->dm_segs[seg].ds_len));
   6943 		}
   6944 
   6945 		KASSERT(lasttx != -1);
   6946 
   6947 		/*
   6948 		 * Set up the command byte on the last descriptor of
   6949 		 * the packet.  If we're in the interrupt delay window,
   6950 		 * delay the interrupt.
   6951 		 */
   6952 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6953 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6954 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6955 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6956 
   6957 		txs->txs_lastdesc = lasttx;
   6958 
   6959 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6960 		    device_xname(sc->sc_dev),
   6961 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6962 
   6963 		/* Sync the descriptors we're using. */
   6964 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6965 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6966 
   6967 		/* Give the packet to the chip. */
   6968 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6969 		sent = true;
   6970 
   6971 		DPRINTF(WM_DEBUG_TX,
   6972 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6973 
   6974 		DPRINTF(WM_DEBUG_TX,
   6975 		    ("%s: TX: finished transmitting packet, job %d\n",
   6976 		    device_xname(sc->sc_dev), txq->txq_snext));
   6977 
   6978 		/* Advance the tx pointer. */
   6979 		txq->txq_free -= txs->txs_ndesc;
   6980 		txq->txq_next = nexttx;
   6981 
   6982 		txq->txq_sfree--;
   6983 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6984 
   6985 		/* Pass the packet to any BPF listeners. */
   6986 		bpf_mtap(ifp, m0);
   6987 	}
   6988 
   6989 	if (m0 != NULL) {
   6990 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6991 		WM_Q_EVCNT_INCR(txq, txdrop);
   6992 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6993 			__func__));
   6994 		m_freem(m0);
   6995 	}
   6996 
   6997 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6998 		/* No more slots; notify upper layer. */
   6999 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7000 	}
   7001 
   7002 	if (sent) {
   7003 		/* Set a watchdog timer in case the chip flakes out. */
   7004 		ifp->if_timer = 5;
   7005 	}
   7006 }
   7007 
   7008 /* Interrupt */
   7009 
   7010 /*
   7011  * wm_txeof:
   7012  *
   7013  *	Helper; handle transmit interrupts.
   7014  */
   7015 static int
   7016 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7017 {
   7018 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7019 	struct wm_txsoft *txs;
   7020 	bool processed = false;
   7021 	int count = 0;
   7022 	int i;
   7023 	uint8_t status;
   7024 
   7025 	KASSERT(mutex_owned(txq->txq_lock));
   7026 
   7027 	if (sc->sc_stopping)
   7028 		return 0;
   7029 
   7030 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7031 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7032 	else
   7033 		ifp->if_flags &= ~IFF_OACTIVE;
   7034 
   7035 	/*
   7036 	 * Go through the Tx list and free mbufs for those
   7037 	 * frames which have been transmitted.
   7038 	 */
   7039 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7040 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7041 		txs = &txq->txq_soft[i];
   7042 
   7043 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7044 			device_xname(sc->sc_dev), i));
   7045 
   7046 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7047 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7048 
   7049 		status =
   7050 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7051 		if ((status & WTX_ST_DD) == 0) {
   7052 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7053 			    BUS_DMASYNC_PREREAD);
   7054 			break;
   7055 		}
   7056 
   7057 		processed = true;
   7058 		count++;
   7059 		DPRINTF(WM_DEBUG_TX,
   7060 		    ("%s: TX: job %d done: descs %d..%d\n",
   7061 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7062 		    txs->txs_lastdesc));
   7063 
   7064 		/*
   7065 		 * XXX We should probably be using the statistics
   7066 		 * XXX registers, but I don't know if they exist
   7067 		 * XXX on chips before the i82544.
   7068 		 */
   7069 
   7070 #ifdef WM_EVENT_COUNTERS
   7071 		if (status & WTX_ST_TU)
   7072 			WM_Q_EVCNT_INCR(txq, tu);
   7073 #endif /* WM_EVENT_COUNTERS */
   7074 
   7075 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7076 			ifp->if_oerrors++;
   7077 			if (status & WTX_ST_LC)
   7078 				log(LOG_WARNING, "%s: late collision\n",
   7079 				    device_xname(sc->sc_dev));
   7080 			else if (status & WTX_ST_EC) {
   7081 				ifp->if_collisions += 16;
   7082 				log(LOG_WARNING, "%s: excessive collisions\n",
   7083 				    device_xname(sc->sc_dev));
   7084 			}
   7085 		} else
   7086 			ifp->if_opackets++;
   7087 
   7088 		txq->txq_free += txs->txs_ndesc;
   7089 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7090 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7091 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7092 		m_freem(txs->txs_mbuf);
   7093 		txs->txs_mbuf = NULL;
   7094 	}
   7095 
   7096 	/* Update the dirty transmit buffer pointer. */
   7097 	txq->txq_sdirty = i;
   7098 	DPRINTF(WM_DEBUG_TX,
   7099 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7100 
   7101 	if (count != 0)
   7102 		rnd_add_uint32(&sc->rnd_source, count);
   7103 
   7104 	/*
   7105 	 * If there are no more pending transmissions, cancel the watchdog
   7106 	 * timer.
   7107 	 */
   7108 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7109 		ifp->if_timer = 0;
   7110 
   7111 	return processed;
   7112 }
   7113 
   7114 /*
   7115  * wm_rxeof:
   7116  *
   7117  *	Helper; handle receive interrupts.
   7118  */
   7119 static void
   7120 wm_rxeof(struct wm_rxqueue *rxq)
   7121 {
   7122 	struct wm_softc *sc = rxq->rxq_sc;
   7123 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7124 	struct wm_rxsoft *rxs;
   7125 	struct mbuf *m;
   7126 	int i, len;
   7127 	int count = 0;
   7128 	uint8_t status, errors;
   7129 	uint16_t vlantag;
   7130 
   7131 	KASSERT(mutex_owned(rxq->rxq_lock));
   7132 
   7133 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7134 		rxs = &rxq->rxq_soft[i];
   7135 
   7136 		DPRINTF(WM_DEBUG_RX,
   7137 		    ("%s: RX: checking descriptor %d\n",
   7138 		    device_xname(sc->sc_dev), i));
   7139 
   7140 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7141 
   7142 		status = rxq->rxq_descs[i].wrx_status;
   7143 		errors = rxq->rxq_descs[i].wrx_errors;
   7144 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7145 		vlantag = rxq->rxq_descs[i].wrx_special;
   7146 
   7147 		if ((status & WRX_ST_DD) == 0) {
   7148 			/* We have processed all of the receive descriptors. */
   7149 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7150 			break;
   7151 		}
   7152 
   7153 		count++;
   7154 		if (__predict_false(rxq->rxq_discard)) {
   7155 			DPRINTF(WM_DEBUG_RX,
   7156 			    ("%s: RX: discarding contents of descriptor %d\n",
   7157 			    device_xname(sc->sc_dev), i));
   7158 			wm_init_rxdesc(rxq, i);
   7159 			if (status & WRX_ST_EOP) {
   7160 				/* Reset our state. */
   7161 				DPRINTF(WM_DEBUG_RX,
   7162 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7163 				    device_xname(sc->sc_dev)));
   7164 				rxq->rxq_discard = 0;
   7165 			}
   7166 			continue;
   7167 		}
   7168 
   7169 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7170 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7171 
   7172 		m = rxs->rxs_mbuf;
   7173 
   7174 		/*
   7175 		 * Add a new receive buffer to the ring, unless of
   7176 		 * course the length is zero. Treat the latter as a
   7177 		 * failed mapping.
   7178 		 */
   7179 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7180 			/*
   7181 			 * Failed, throw away what we've done so
   7182 			 * far, and discard the rest of the packet.
   7183 			 */
   7184 			ifp->if_ierrors++;
   7185 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7186 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7187 			wm_init_rxdesc(rxq, i);
   7188 			if ((status & WRX_ST_EOP) == 0)
   7189 				rxq->rxq_discard = 1;
   7190 			if (rxq->rxq_head != NULL)
   7191 				m_freem(rxq->rxq_head);
   7192 			WM_RXCHAIN_RESET(rxq);
   7193 			DPRINTF(WM_DEBUG_RX,
   7194 			    ("%s: RX: Rx buffer allocation failed, "
   7195 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7196 			    rxq->rxq_discard ? " (discard)" : ""));
   7197 			continue;
   7198 		}
   7199 
   7200 		m->m_len = len;
   7201 		rxq->rxq_len += len;
   7202 		DPRINTF(WM_DEBUG_RX,
   7203 		    ("%s: RX: buffer at %p len %d\n",
   7204 		    device_xname(sc->sc_dev), m->m_data, len));
   7205 
   7206 		/* If this is not the end of the packet, keep looking. */
   7207 		if ((status & WRX_ST_EOP) == 0) {
   7208 			WM_RXCHAIN_LINK(rxq, m);
   7209 			DPRINTF(WM_DEBUG_RX,
   7210 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7211 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7212 			continue;
   7213 		}
   7214 
   7215 		/*
   7216 		 * Okay, we have the entire packet now.  The chip is
   7217 		 * configured to include the FCS except I350 and I21[01]
   7218 		 * (not all chips can be configured to strip it),
   7219 		 * so we need to trim it.
   7220 		 * May need to adjust length of previous mbuf in the
   7221 		 * chain if the current mbuf is too short.
   7222 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7223 		 * is always set in I350, so we don't trim it.
   7224 		 */
   7225 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7226 		    && (sc->sc_type != WM_T_I210)
   7227 		    && (sc->sc_type != WM_T_I211)) {
   7228 			if (m->m_len < ETHER_CRC_LEN) {
   7229 				rxq->rxq_tail->m_len
   7230 				    -= (ETHER_CRC_LEN - m->m_len);
   7231 				m->m_len = 0;
   7232 			} else
   7233 				m->m_len -= ETHER_CRC_LEN;
   7234 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7235 		} else
   7236 			len = rxq->rxq_len;
   7237 
   7238 		WM_RXCHAIN_LINK(rxq, m);
   7239 
   7240 		*rxq->rxq_tailp = NULL;
   7241 		m = rxq->rxq_head;
   7242 
   7243 		WM_RXCHAIN_RESET(rxq);
   7244 
   7245 		DPRINTF(WM_DEBUG_RX,
   7246 		    ("%s: RX: have entire packet, len -> %d\n",
   7247 		    device_xname(sc->sc_dev), len));
   7248 
   7249 		/* If an error occurred, update stats and drop the packet. */
   7250 		if (errors &
   7251 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7252 			if (errors & WRX_ER_SE)
   7253 				log(LOG_WARNING, "%s: symbol error\n",
   7254 				    device_xname(sc->sc_dev));
   7255 			else if (errors & WRX_ER_SEQ)
   7256 				log(LOG_WARNING, "%s: receive sequence error\n",
   7257 				    device_xname(sc->sc_dev));
   7258 			else if (errors & WRX_ER_CE)
   7259 				log(LOG_WARNING, "%s: CRC error\n",
   7260 				    device_xname(sc->sc_dev));
   7261 			m_freem(m);
   7262 			continue;
   7263 		}
   7264 
   7265 		/* No errors.  Receive the packet. */
   7266 		m_set_rcvif(m, ifp);
   7267 		m->m_pkthdr.len = len;
   7268 
   7269 		/*
   7270 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7271 		 * for us.  Associate the tag with the packet.
   7272 		 */
   7273 		/* XXXX should check for i350 and i354 */
   7274 		if ((status & WRX_ST_VP) != 0) {
   7275 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7276 		}
   7277 
   7278 		/* Set up checksum info for this packet. */
   7279 		if ((status & WRX_ST_IXSM) == 0) {
   7280 			if (status & WRX_ST_IPCS) {
   7281 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7282 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7283 				if (errors & WRX_ER_IPE)
   7284 					m->m_pkthdr.csum_flags |=
   7285 					    M_CSUM_IPv4_BAD;
   7286 			}
   7287 			if (status & WRX_ST_TCPCS) {
   7288 				/*
   7289 				 * Note: we don't know if this was TCP or UDP,
   7290 				 * so we just set both bits, and expect the
   7291 				 * upper layers to deal.
   7292 				 */
   7293 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7294 				m->m_pkthdr.csum_flags |=
   7295 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7296 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7297 				if (errors & WRX_ER_TCPE)
   7298 					m->m_pkthdr.csum_flags |=
   7299 					    M_CSUM_TCP_UDP_BAD;
   7300 			}
   7301 		}
   7302 
   7303 		ifp->if_ipackets++;
   7304 
   7305 		mutex_exit(rxq->rxq_lock);
   7306 
   7307 		/* Pass this up to any BPF listeners. */
   7308 		bpf_mtap(ifp, m);
   7309 
   7310 		/* Pass it on. */
   7311 		if_percpuq_enqueue(sc->sc_ipq, m);
   7312 
   7313 		mutex_enter(rxq->rxq_lock);
   7314 
   7315 		if (sc->sc_stopping)
   7316 			break;
   7317 	}
   7318 
   7319 	/* Update the receive pointer. */
   7320 	rxq->rxq_ptr = i;
   7321 	if (count != 0)
   7322 		rnd_add_uint32(&sc->rnd_source, count);
   7323 
   7324 	DPRINTF(WM_DEBUG_RX,
   7325 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7326 }
   7327 
   7328 /*
   7329  * wm_linkintr_gmii:
   7330  *
   7331  *	Helper; handle link interrupts for GMII.
   7332  */
   7333 static void
   7334 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7335 {
   7336 
   7337 	KASSERT(WM_CORE_LOCKED(sc));
   7338 
   7339 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7340 		__func__));
   7341 
   7342 	if (icr & ICR_LSC) {
   7343 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7344 
   7345 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7346 			wm_gig_downshift_workaround_ich8lan(sc);
   7347 
   7348 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7349 			device_xname(sc->sc_dev)));
   7350 		mii_pollstat(&sc->sc_mii);
   7351 		if (sc->sc_type == WM_T_82543) {
   7352 			int miistatus, active;
   7353 
   7354 			/*
   7355 			 * With 82543, we need to force speed and
   7356 			 * duplex on the MAC equal to what the PHY
   7357 			 * speed and duplex configuration is.
   7358 			 */
   7359 			miistatus = sc->sc_mii.mii_media_status;
   7360 
   7361 			if (miistatus & IFM_ACTIVE) {
   7362 				active = sc->sc_mii.mii_media_active;
   7363 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7364 				switch (IFM_SUBTYPE(active)) {
   7365 				case IFM_10_T:
   7366 					sc->sc_ctrl |= CTRL_SPEED_10;
   7367 					break;
   7368 				case IFM_100_TX:
   7369 					sc->sc_ctrl |= CTRL_SPEED_100;
   7370 					break;
   7371 				case IFM_1000_T:
   7372 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7373 					break;
   7374 				default:
   7375 					/*
   7376 					 * fiber?
   7377 					 * Shoud not enter here.
   7378 					 */
   7379 					printf("unknown media (%x)\n", active);
   7380 					break;
   7381 				}
   7382 				if (active & IFM_FDX)
   7383 					sc->sc_ctrl |= CTRL_FD;
   7384 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7385 			}
   7386 		} else if ((sc->sc_type == WM_T_ICH8)
   7387 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7388 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7389 		} else if (sc->sc_type == WM_T_PCH) {
   7390 			wm_k1_gig_workaround_hv(sc,
   7391 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7392 		}
   7393 
   7394 		if ((sc->sc_phytype == WMPHY_82578)
   7395 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7396 			== IFM_1000_T)) {
   7397 
   7398 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7399 				delay(200*1000); /* XXX too big */
   7400 
   7401 				/* Link stall fix for link up */
   7402 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7403 				    HV_MUX_DATA_CTRL,
   7404 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7405 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7406 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7407 				    HV_MUX_DATA_CTRL,
   7408 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7409 			}
   7410 		}
   7411 	} else if (icr & ICR_RXSEQ) {
   7412 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7413 			device_xname(sc->sc_dev)));
   7414 	}
   7415 }
   7416 
   7417 /*
   7418  * wm_linkintr_tbi:
   7419  *
   7420  *	Helper; handle link interrupts for TBI mode.
   7421  */
   7422 static void
   7423 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7424 {
   7425 	uint32_t status;
   7426 
   7427 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7428 		__func__));
   7429 
   7430 	status = CSR_READ(sc, WMREG_STATUS);
   7431 	if (icr & ICR_LSC) {
   7432 		if (status & STATUS_LU) {
   7433 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7434 			    device_xname(sc->sc_dev),
   7435 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7436 			/*
   7437 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7438 			 * so we should update sc->sc_ctrl
   7439 			 */
   7440 
   7441 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7442 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7443 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7444 			if (status & STATUS_FD)
   7445 				sc->sc_tctl |=
   7446 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7447 			else
   7448 				sc->sc_tctl |=
   7449 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7450 			if (sc->sc_ctrl & CTRL_TFCE)
   7451 				sc->sc_fcrtl |= FCRTL_XONE;
   7452 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7453 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7454 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7455 				      sc->sc_fcrtl);
   7456 			sc->sc_tbi_linkup = 1;
   7457 		} else {
   7458 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7459 			    device_xname(sc->sc_dev)));
   7460 			sc->sc_tbi_linkup = 0;
   7461 		}
   7462 		/* Update LED */
   7463 		wm_tbi_serdes_set_linkled(sc);
   7464 	} else if (icr & ICR_RXSEQ) {
   7465 		DPRINTF(WM_DEBUG_LINK,
   7466 		    ("%s: LINK: Receive sequence error\n",
   7467 		    device_xname(sc->sc_dev)));
   7468 	}
   7469 }
   7470 
   7471 /*
   7472  * wm_linkintr_serdes:
   7473  *
   7474  *	Helper; handle link interrupts for TBI mode.
   7475  */
   7476 static void
   7477 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7478 {
   7479 	struct mii_data *mii = &sc->sc_mii;
   7480 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7481 	uint32_t pcs_adv, pcs_lpab, reg;
   7482 
   7483 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7484 		__func__));
   7485 
   7486 	if (icr & ICR_LSC) {
   7487 		/* Check PCS */
   7488 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7489 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7490 			mii->mii_media_status |= IFM_ACTIVE;
   7491 			sc->sc_tbi_linkup = 1;
   7492 		} else {
   7493 			mii->mii_media_status |= IFM_NONE;
   7494 			sc->sc_tbi_linkup = 0;
   7495 			wm_tbi_serdes_set_linkled(sc);
   7496 			return;
   7497 		}
   7498 		mii->mii_media_active |= IFM_1000_SX;
   7499 		if ((reg & PCS_LSTS_FDX) != 0)
   7500 			mii->mii_media_active |= IFM_FDX;
   7501 		else
   7502 			mii->mii_media_active |= IFM_HDX;
   7503 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7504 			/* Check flow */
   7505 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7506 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7507 				DPRINTF(WM_DEBUG_LINK,
   7508 				    ("XXX LINKOK but not ACOMP\n"));
   7509 				return;
   7510 			}
   7511 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7512 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7513 			DPRINTF(WM_DEBUG_LINK,
   7514 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7515 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7516 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7517 				mii->mii_media_active |= IFM_FLOW
   7518 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7519 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7520 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7521 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7522 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7523 				mii->mii_media_active |= IFM_FLOW
   7524 				    | IFM_ETH_TXPAUSE;
   7525 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7526 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7527 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7528 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7529 				mii->mii_media_active |= IFM_FLOW
   7530 				    | IFM_ETH_RXPAUSE;
   7531 		}
   7532 		/* Update LED */
   7533 		wm_tbi_serdes_set_linkled(sc);
   7534 	} else {
   7535 		DPRINTF(WM_DEBUG_LINK,
   7536 		    ("%s: LINK: Receive sequence error\n",
   7537 		    device_xname(sc->sc_dev)));
   7538 	}
   7539 }
   7540 
   7541 /*
   7542  * wm_linkintr:
   7543  *
   7544  *	Helper; handle link interrupts.
   7545  */
   7546 static void
   7547 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7548 {
   7549 
   7550 	KASSERT(WM_CORE_LOCKED(sc));
   7551 
   7552 	if (sc->sc_flags & WM_F_HAS_MII)
   7553 		wm_linkintr_gmii(sc, icr);
   7554 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7555 	    && (sc->sc_type >= WM_T_82575))
   7556 		wm_linkintr_serdes(sc, icr);
   7557 	else
   7558 		wm_linkintr_tbi(sc, icr);
   7559 }
   7560 
   7561 /*
   7562  * wm_intr_legacy:
   7563  *
   7564  *	Interrupt service routine for INTx and MSI.
   7565  */
   7566 static int
   7567 wm_intr_legacy(void *arg)
   7568 {
   7569 	struct wm_softc *sc = arg;
   7570 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7571 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7572 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7573 	uint32_t icr, rndval = 0;
   7574 	int handled = 0;
   7575 
   7576 	DPRINTF(WM_DEBUG_TX,
   7577 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7578 	while (1 /* CONSTCOND */) {
   7579 		icr = CSR_READ(sc, WMREG_ICR);
   7580 		if ((icr & sc->sc_icr) == 0)
   7581 			break;
   7582 		if (rndval == 0)
   7583 			rndval = icr;
   7584 
   7585 		mutex_enter(rxq->rxq_lock);
   7586 
   7587 		if (sc->sc_stopping) {
   7588 			mutex_exit(rxq->rxq_lock);
   7589 			break;
   7590 		}
   7591 
   7592 		handled = 1;
   7593 
   7594 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7595 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7596 			DPRINTF(WM_DEBUG_RX,
   7597 			    ("%s: RX: got Rx intr 0x%08x\n",
   7598 			    device_xname(sc->sc_dev),
   7599 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7600 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7601 		}
   7602 #endif
   7603 		wm_rxeof(rxq);
   7604 
   7605 		mutex_exit(rxq->rxq_lock);
   7606 		mutex_enter(txq->txq_lock);
   7607 
   7608 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7609 		if (icr & ICR_TXDW) {
   7610 			DPRINTF(WM_DEBUG_TX,
   7611 			    ("%s: TX: got TXDW interrupt\n",
   7612 			    device_xname(sc->sc_dev)));
   7613 			WM_Q_EVCNT_INCR(txq, txdw);
   7614 		}
   7615 #endif
   7616 		wm_txeof(sc, txq);
   7617 
   7618 		mutex_exit(txq->txq_lock);
   7619 		WM_CORE_LOCK(sc);
   7620 
   7621 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7622 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7623 			wm_linkintr(sc, icr);
   7624 		}
   7625 
   7626 		WM_CORE_UNLOCK(sc);
   7627 
   7628 		if (icr & ICR_RXO) {
   7629 #if defined(WM_DEBUG)
   7630 			log(LOG_WARNING, "%s: Receive overrun\n",
   7631 			    device_xname(sc->sc_dev));
   7632 #endif /* defined(WM_DEBUG) */
   7633 		}
   7634 	}
   7635 
   7636 	rnd_add_uint32(&sc->rnd_source, rndval);
   7637 
   7638 	if (handled) {
   7639 		/* Try to get more packets going. */
   7640 		ifp->if_start(ifp);
   7641 	}
   7642 
   7643 	return handled;
   7644 }
   7645 
   7646 static int
   7647 wm_txrxintr_msix(void *arg)
   7648 {
   7649 	struct wm_queue *wmq = arg;
   7650 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7651 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7652 	struct wm_softc *sc = txq->txq_sc;
   7653 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7654 
   7655 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7656 
   7657 	DPRINTF(WM_DEBUG_TX,
   7658 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7659 
   7660 	if (sc->sc_type == WM_T_82574)
   7661 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7662 	else if (sc->sc_type == WM_T_82575)
   7663 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7664 	else
   7665 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7666 
   7667 	if (!sc->sc_stopping) {
   7668 		mutex_enter(txq->txq_lock);
   7669 
   7670 		WM_Q_EVCNT_INCR(txq, txdw);
   7671 		wm_txeof(sc, txq);
   7672 
   7673 		/* Try to get more packets going. */
   7674 		if (pcq_peek(txq->txq_interq) != NULL)
   7675 			wm_nq_transmit_locked(ifp, txq);
   7676 		/*
   7677 		 * There are still some upper layer processing which call
   7678 		 * ifp->if_start(). e.g. ALTQ
   7679 		 */
   7680 		if (wmq->wmq_id == 0) {
   7681 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7682 				wm_nq_start_locked(ifp);
   7683 		}
   7684 		mutex_exit(txq->txq_lock);
   7685 	}
   7686 
   7687 	DPRINTF(WM_DEBUG_RX,
   7688 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7689 
   7690 	if (!sc->sc_stopping) {
   7691 		mutex_enter(rxq->rxq_lock);
   7692 		WM_Q_EVCNT_INCR(rxq, rxintr);
   7693 		wm_rxeof(rxq);
   7694 		mutex_exit(rxq->rxq_lock);
   7695 	}
   7696 
   7697 	if (sc->sc_type == WM_T_82574)
   7698 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7699 	else if (sc->sc_type == WM_T_82575)
   7700 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7701 	else
   7702 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7703 
   7704 	return 1;
   7705 }
   7706 
   7707 /*
   7708  * wm_linkintr_msix:
   7709  *
   7710  *	Interrupt service routine for link status change for MSI-X.
   7711  */
   7712 static int
   7713 wm_linkintr_msix(void *arg)
   7714 {
   7715 	struct wm_softc *sc = arg;
   7716 	uint32_t reg;
   7717 
   7718 	DPRINTF(WM_DEBUG_LINK,
   7719 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7720 
   7721 	reg = CSR_READ(sc, WMREG_ICR);
   7722 	WM_CORE_LOCK(sc);
   7723 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7724 		goto out;
   7725 
   7726 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7727 	wm_linkintr(sc, ICR_LSC);
   7728 
   7729 out:
   7730 	WM_CORE_UNLOCK(sc);
   7731 
   7732 	if (sc->sc_type == WM_T_82574)
   7733 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7734 	else if (sc->sc_type == WM_T_82575)
   7735 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7736 	else
   7737 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7738 
   7739 	return 1;
   7740 }
   7741 
   7742 /*
   7743  * Media related.
   7744  * GMII, SGMII, TBI (and SERDES)
   7745  */
   7746 
   7747 /* Common */
   7748 
   7749 /*
   7750  * wm_tbi_serdes_set_linkled:
   7751  *
   7752  *	Update the link LED on TBI and SERDES devices.
   7753  */
   7754 static void
   7755 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7756 {
   7757 
   7758 	if (sc->sc_tbi_linkup)
   7759 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7760 	else
   7761 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7762 
   7763 	/* 82540 or newer devices are active low */
   7764 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7765 
   7766 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7767 }
   7768 
   7769 /* GMII related */
   7770 
   7771 /*
   7772  * wm_gmii_reset:
   7773  *
   7774  *	Reset the PHY.
   7775  */
   7776 static void
   7777 wm_gmii_reset(struct wm_softc *sc)
   7778 {
   7779 	uint32_t reg;
   7780 	int rv;
   7781 
   7782 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7783 		device_xname(sc->sc_dev), __func__));
   7784 
   7785 	/* Get phy semaphore */
   7786 	switch (sc->sc_type) {
   7787 	case WM_T_82571:
   7788 	case WM_T_82572:
   7789 	case WM_T_82573:
   7790 	case WM_T_82574:
   7791 	case WM_T_82583:
   7792 		 /* XXX should get sw semaphore, too */
   7793 		rv = wm_get_swsm_semaphore(sc);
   7794 		break;
   7795 	case WM_T_82575:
   7796 	case WM_T_82576:
   7797 	case WM_T_82580:
   7798 	case WM_T_I350:
   7799 	case WM_T_I354:
   7800 	case WM_T_I210:
   7801 	case WM_T_I211:
   7802 	case WM_T_80003:
   7803 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7804 		break;
   7805 	case WM_T_ICH8:
   7806 	case WM_T_ICH9:
   7807 	case WM_T_ICH10:
   7808 	case WM_T_PCH:
   7809 	case WM_T_PCH2:
   7810 	case WM_T_PCH_LPT:
   7811 	case WM_T_PCH_SPT:
   7812 		rv = wm_get_swfwhw_semaphore(sc);
   7813 		break;
   7814 	default:
   7815 		/* nothing to do*/
   7816 		rv = 0;
   7817 		break;
   7818 	}
   7819 	if (rv != 0) {
   7820 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7821 		    __func__);
   7822 		return;
   7823 	}
   7824 
   7825 	switch (sc->sc_type) {
   7826 	case WM_T_82542_2_0:
   7827 	case WM_T_82542_2_1:
   7828 		/* null */
   7829 		break;
   7830 	case WM_T_82543:
   7831 		/*
   7832 		 * With 82543, we need to force speed and duplex on the MAC
   7833 		 * equal to what the PHY speed and duplex configuration is.
   7834 		 * In addition, we need to perform a hardware reset on the PHY
   7835 		 * to take it out of reset.
   7836 		 */
   7837 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7838 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7839 
   7840 		/* The PHY reset pin is active-low. */
   7841 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7842 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7843 		    CTRL_EXT_SWDPIN(4));
   7844 		reg |= CTRL_EXT_SWDPIO(4);
   7845 
   7846 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7847 		CSR_WRITE_FLUSH(sc);
   7848 		delay(10*1000);
   7849 
   7850 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7851 		CSR_WRITE_FLUSH(sc);
   7852 		delay(150);
   7853 #if 0
   7854 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7855 #endif
   7856 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7857 		break;
   7858 	case WM_T_82544:	/* reset 10000us */
   7859 	case WM_T_82540:
   7860 	case WM_T_82545:
   7861 	case WM_T_82545_3:
   7862 	case WM_T_82546:
   7863 	case WM_T_82546_3:
   7864 	case WM_T_82541:
   7865 	case WM_T_82541_2:
   7866 	case WM_T_82547:
   7867 	case WM_T_82547_2:
   7868 	case WM_T_82571:	/* reset 100us */
   7869 	case WM_T_82572:
   7870 	case WM_T_82573:
   7871 	case WM_T_82574:
   7872 	case WM_T_82575:
   7873 	case WM_T_82576:
   7874 	case WM_T_82580:
   7875 	case WM_T_I350:
   7876 	case WM_T_I354:
   7877 	case WM_T_I210:
   7878 	case WM_T_I211:
   7879 	case WM_T_82583:
   7880 	case WM_T_80003:
   7881 		/* generic reset */
   7882 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7883 		CSR_WRITE_FLUSH(sc);
   7884 		delay(20000);
   7885 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7886 		CSR_WRITE_FLUSH(sc);
   7887 		delay(20000);
   7888 
   7889 		if ((sc->sc_type == WM_T_82541)
   7890 		    || (sc->sc_type == WM_T_82541_2)
   7891 		    || (sc->sc_type == WM_T_82547)
   7892 		    || (sc->sc_type == WM_T_82547_2)) {
   7893 			/* workaround for igp are done in igp_reset() */
   7894 			/* XXX add code to set LED after phy reset */
   7895 		}
   7896 		break;
   7897 	case WM_T_ICH8:
   7898 	case WM_T_ICH9:
   7899 	case WM_T_ICH10:
   7900 	case WM_T_PCH:
   7901 	case WM_T_PCH2:
   7902 	case WM_T_PCH_LPT:
   7903 	case WM_T_PCH_SPT:
   7904 		/* generic reset */
   7905 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7906 		CSR_WRITE_FLUSH(sc);
   7907 		delay(100);
   7908 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7909 		CSR_WRITE_FLUSH(sc);
   7910 		delay(150);
   7911 		break;
   7912 	default:
   7913 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7914 		    __func__);
   7915 		break;
   7916 	}
   7917 
   7918 	/* release PHY semaphore */
   7919 	switch (sc->sc_type) {
   7920 	case WM_T_82571:
   7921 	case WM_T_82572:
   7922 	case WM_T_82573:
   7923 	case WM_T_82574:
   7924 	case WM_T_82583:
   7925 		 /* XXX should put sw semaphore, too */
   7926 		wm_put_swsm_semaphore(sc);
   7927 		break;
   7928 	case WM_T_82575:
   7929 	case WM_T_82576:
   7930 	case WM_T_82580:
   7931 	case WM_T_I350:
   7932 	case WM_T_I354:
   7933 	case WM_T_I210:
   7934 	case WM_T_I211:
   7935 	case WM_T_80003:
   7936 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7937 		break;
   7938 	case WM_T_ICH8:
   7939 	case WM_T_ICH9:
   7940 	case WM_T_ICH10:
   7941 	case WM_T_PCH:
   7942 	case WM_T_PCH2:
   7943 	case WM_T_PCH_LPT:
   7944 	case WM_T_PCH_SPT:
   7945 		wm_put_swfwhw_semaphore(sc);
   7946 		break;
   7947 	default:
   7948 		/* nothing to do */
   7949 		rv = 0;
   7950 		break;
   7951 	}
   7952 
   7953 	/* get_cfg_done */
   7954 	wm_get_cfg_done(sc);
   7955 
   7956 	/* extra setup */
   7957 	switch (sc->sc_type) {
   7958 	case WM_T_82542_2_0:
   7959 	case WM_T_82542_2_1:
   7960 	case WM_T_82543:
   7961 	case WM_T_82544:
   7962 	case WM_T_82540:
   7963 	case WM_T_82545:
   7964 	case WM_T_82545_3:
   7965 	case WM_T_82546:
   7966 	case WM_T_82546_3:
   7967 	case WM_T_82541_2:
   7968 	case WM_T_82547_2:
   7969 	case WM_T_82571:
   7970 	case WM_T_82572:
   7971 	case WM_T_82573:
   7972 	case WM_T_82575:
   7973 	case WM_T_82576:
   7974 	case WM_T_82580:
   7975 	case WM_T_I350:
   7976 	case WM_T_I354:
   7977 	case WM_T_I210:
   7978 	case WM_T_I211:
   7979 	case WM_T_80003:
   7980 		/* null */
   7981 		break;
   7982 	case WM_T_82574:
   7983 	case WM_T_82583:
   7984 		wm_lplu_d0_disable(sc);
   7985 		break;
   7986 	case WM_T_82541:
   7987 	case WM_T_82547:
   7988 		/* XXX Configure actively LED after PHY reset */
   7989 		break;
   7990 	case WM_T_ICH8:
   7991 	case WM_T_ICH9:
   7992 	case WM_T_ICH10:
   7993 	case WM_T_PCH:
   7994 	case WM_T_PCH2:
   7995 	case WM_T_PCH_LPT:
   7996 	case WM_T_PCH_SPT:
   7997 		/* Allow time for h/w to get to a quiescent state afer reset */
   7998 		delay(10*1000);
   7999 
   8000 		if (sc->sc_type == WM_T_PCH)
   8001 			wm_hv_phy_workaround_ich8lan(sc);
   8002 
   8003 		if (sc->sc_type == WM_T_PCH2)
   8004 			wm_lv_phy_workaround_ich8lan(sc);
   8005 
   8006 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   8007 			/*
   8008 			 * dummy read to clear the phy wakeup bit after lcd
   8009 			 * reset
   8010 			 */
   8011 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   8012 		}
   8013 
   8014 		/*
   8015 		 * XXX Configure the LCD with th extended configuration region
   8016 		 * in NVM
   8017 		 */
   8018 
   8019 		/* Disable D0 LPLU. */
   8020 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8021 			wm_lplu_d0_disable_pch(sc);
   8022 		else
   8023 			wm_lplu_d0_disable(sc);	/* ICH* */
   8024 		break;
   8025 	default:
   8026 		panic("%s: unknown type\n", __func__);
   8027 		break;
   8028 	}
   8029 }
   8030 
   8031 /*
   8032  * wm_get_phy_id_82575:
   8033  *
   8034  * Return PHY ID. Return -1 if it failed.
   8035  */
   8036 static int
   8037 wm_get_phy_id_82575(struct wm_softc *sc)
   8038 {
   8039 	uint32_t reg;
   8040 	int phyid = -1;
   8041 
   8042 	/* XXX */
   8043 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8044 		return -1;
   8045 
   8046 	if (wm_sgmii_uses_mdio(sc)) {
   8047 		switch (sc->sc_type) {
   8048 		case WM_T_82575:
   8049 		case WM_T_82576:
   8050 			reg = CSR_READ(sc, WMREG_MDIC);
   8051 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8052 			break;
   8053 		case WM_T_82580:
   8054 		case WM_T_I350:
   8055 		case WM_T_I354:
   8056 		case WM_T_I210:
   8057 		case WM_T_I211:
   8058 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8059 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8060 			break;
   8061 		default:
   8062 			return -1;
   8063 		}
   8064 	}
   8065 
   8066 	return phyid;
   8067 }
   8068 
   8069 
   8070 /*
   8071  * wm_gmii_mediainit:
   8072  *
   8073  *	Initialize media for use on 1000BASE-T devices.
   8074  */
   8075 static void
   8076 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8077 {
   8078 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8079 	struct mii_data *mii = &sc->sc_mii;
   8080 	uint32_t reg;
   8081 
   8082 	/* We have GMII. */
   8083 	sc->sc_flags |= WM_F_HAS_MII;
   8084 
   8085 	if (sc->sc_type == WM_T_80003)
   8086 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8087 	else
   8088 		sc->sc_tipg = TIPG_1000T_DFLT;
   8089 
   8090 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8091 	if ((sc->sc_type == WM_T_82580)
   8092 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8093 	    || (sc->sc_type == WM_T_I211)) {
   8094 		reg = CSR_READ(sc, WMREG_PHPM);
   8095 		reg &= ~PHPM_GO_LINK_D;
   8096 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8097 	}
   8098 
   8099 	/*
   8100 	 * Let the chip set speed/duplex on its own based on
   8101 	 * signals from the PHY.
   8102 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8103 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8104 	 */
   8105 	sc->sc_ctrl |= CTRL_SLU;
   8106 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8107 
   8108 	/* Initialize our media structures and probe the GMII. */
   8109 	mii->mii_ifp = ifp;
   8110 
   8111 	/*
   8112 	 * Determine the PHY access method.
   8113 	 *
   8114 	 *  For SGMII, use SGMII specific method.
   8115 	 *
   8116 	 *  For some devices, we can determine the PHY access method
   8117 	 * from sc_type.
   8118 	 *
   8119 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8120 	 * access  method by sc_type, so use the PCI product ID for some
   8121 	 * devices.
   8122 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8123 	 * can't detect, then use bm's method.
   8124 	 */
   8125 	switch (prodid) {
   8126 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8127 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8128 		/* 82577 */
   8129 		sc->sc_phytype = WMPHY_82577;
   8130 		break;
   8131 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8132 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8133 		/* 82578 */
   8134 		sc->sc_phytype = WMPHY_82578;
   8135 		break;
   8136 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8137 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8138 		/* 82579 */
   8139 		sc->sc_phytype = WMPHY_82579;
   8140 		break;
   8141 	case PCI_PRODUCT_INTEL_82801I_BM:
   8142 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8143 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8144 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8145 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8146 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8147 		/* 82567 */
   8148 		sc->sc_phytype = WMPHY_BM;
   8149 		mii->mii_readreg = wm_gmii_bm_readreg;
   8150 		mii->mii_writereg = wm_gmii_bm_writereg;
   8151 		break;
   8152 	default:
   8153 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8154 		    && !wm_sgmii_uses_mdio(sc)){
   8155 			/* SGMII */
   8156 			mii->mii_readreg = wm_sgmii_readreg;
   8157 			mii->mii_writereg = wm_sgmii_writereg;
   8158 		} else if (sc->sc_type >= WM_T_80003) {
   8159 			/* 80003 */
   8160 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8161 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8162 		} else if (sc->sc_type >= WM_T_I210) {
   8163 			/* I210 and I211 */
   8164 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8165 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8166 		} else if (sc->sc_type >= WM_T_82580) {
   8167 			/* 82580, I350 and I354 */
   8168 			sc->sc_phytype = WMPHY_82580;
   8169 			mii->mii_readreg = wm_gmii_82580_readreg;
   8170 			mii->mii_writereg = wm_gmii_82580_writereg;
   8171 		} else if (sc->sc_type >= WM_T_82544) {
   8172 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8173 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8174 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8175 		} else {
   8176 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8177 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8178 		}
   8179 		break;
   8180 	}
   8181 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8182 		/* All PCH* use _hv_ */
   8183 		mii->mii_readreg = wm_gmii_hv_readreg;
   8184 		mii->mii_writereg = wm_gmii_hv_writereg;
   8185 	}
   8186 	mii->mii_statchg = wm_gmii_statchg;
   8187 
   8188 	wm_gmii_reset(sc);
   8189 
   8190 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8191 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8192 	    wm_gmii_mediastatus);
   8193 
   8194 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8195 	    || (sc->sc_type == WM_T_82580)
   8196 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8197 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8198 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8199 			/* Attach only one port */
   8200 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8201 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8202 		} else {
   8203 			int i, id;
   8204 			uint32_t ctrl_ext;
   8205 
   8206 			id = wm_get_phy_id_82575(sc);
   8207 			if (id != -1) {
   8208 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8209 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8210 			}
   8211 			if ((id == -1)
   8212 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8213 				/* Power on sgmii phy if it is disabled */
   8214 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8215 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8216 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8217 				CSR_WRITE_FLUSH(sc);
   8218 				delay(300*1000); /* XXX too long */
   8219 
   8220 				/* from 1 to 8 */
   8221 				for (i = 1; i < 8; i++)
   8222 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8223 					    0xffffffff, i, MII_OFFSET_ANY,
   8224 					    MIIF_DOPAUSE);
   8225 
   8226 				/* restore previous sfp cage power state */
   8227 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8228 			}
   8229 		}
   8230 	} else {
   8231 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8232 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8233 	}
   8234 
   8235 	/*
   8236 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8237 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8238 	 */
   8239 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8240 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8241 		wm_set_mdio_slow_mode_hv(sc);
   8242 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8243 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8244 	}
   8245 
   8246 	/*
   8247 	 * (For ICH8 variants)
   8248 	 * If PHY detection failed, use BM's r/w function and retry.
   8249 	 */
   8250 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8251 		/* if failed, retry with *_bm_* */
   8252 		mii->mii_readreg = wm_gmii_bm_readreg;
   8253 		mii->mii_writereg = wm_gmii_bm_writereg;
   8254 
   8255 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8256 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8257 	}
   8258 
   8259 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8260 		/* Any PHY wasn't find */
   8261 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8262 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8263 		sc->sc_phytype = WMPHY_NONE;
   8264 	} else {
   8265 		/*
   8266 		 * PHY Found!
   8267 		 * Check PHY type.
   8268 		 */
   8269 		uint32_t model;
   8270 		struct mii_softc *child;
   8271 
   8272 		child = LIST_FIRST(&mii->mii_phys);
   8273 		model = child->mii_mpd_model;
   8274 		if (model == MII_MODEL_yyINTEL_I82566)
   8275 			sc->sc_phytype = WMPHY_IGP_3;
   8276 
   8277 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8278 	}
   8279 }
   8280 
   8281 /*
   8282  * wm_gmii_mediachange:	[ifmedia interface function]
   8283  *
   8284  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8285  */
   8286 static int
   8287 wm_gmii_mediachange(struct ifnet *ifp)
   8288 {
   8289 	struct wm_softc *sc = ifp->if_softc;
   8290 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8291 	int rc;
   8292 
   8293 	if ((ifp->if_flags & IFF_UP) == 0)
   8294 		return 0;
   8295 
   8296 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8297 	sc->sc_ctrl |= CTRL_SLU;
   8298 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8299 	    || (sc->sc_type > WM_T_82543)) {
   8300 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8301 	} else {
   8302 		sc->sc_ctrl &= ~CTRL_ASDE;
   8303 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8304 		if (ife->ifm_media & IFM_FDX)
   8305 			sc->sc_ctrl |= CTRL_FD;
   8306 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8307 		case IFM_10_T:
   8308 			sc->sc_ctrl |= CTRL_SPEED_10;
   8309 			break;
   8310 		case IFM_100_TX:
   8311 			sc->sc_ctrl |= CTRL_SPEED_100;
   8312 			break;
   8313 		case IFM_1000_T:
   8314 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8315 			break;
   8316 		default:
   8317 			panic("wm_gmii_mediachange: bad media 0x%x",
   8318 			    ife->ifm_media);
   8319 		}
   8320 	}
   8321 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8322 	if (sc->sc_type <= WM_T_82543)
   8323 		wm_gmii_reset(sc);
   8324 
   8325 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8326 		return 0;
   8327 	return rc;
   8328 }
   8329 
   8330 /*
   8331  * wm_gmii_mediastatus:	[ifmedia interface function]
   8332  *
   8333  *	Get the current interface media status on a 1000BASE-T device.
   8334  */
   8335 static void
   8336 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8337 {
   8338 	struct wm_softc *sc = ifp->if_softc;
   8339 
   8340 	ether_mediastatus(ifp, ifmr);
   8341 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8342 	    | sc->sc_flowflags;
   8343 }
   8344 
   8345 #define	MDI_IO		CTRL_SWDPIN(2)
   8346 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8347 #define	MDI_CLK		CTRL_SWDPIN(3)
   8348 
   8349 static void
   8350 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8351 {
   8352 	uint32_t i, v;
   8353 
   8354 	v = CSR_READ(sc, WMREG_CTRL);
   8355 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8356 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8357 
   8358 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8359 		if (data & i)
   8360 			v |= MDI_IO;
   8361 		else
   8362 			v &= ~MDI_IO;
   8363 		CSR_WRITE(sc, WMREG_CTRL, v);
   8364 		CSR_WRITE_FLUSH(sc);
   8365 		delay(10);
   8366 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8367 		CSR_WRITE_FLUSH(sc);
   8368 		delay(10);
   8369 		CSR_WRITE(sc, WMREG_CTRL, v);
   8370 		CSR_WRITE_FLUSH(sc);
   8371 		delay(10);
   8372 	}
   8373 }
   8374 
   8375 static uint32_t
   8376 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8377 {
   8378 	uint32_t v, i, data = 0;
   8379 
   8380 	v = CSR_READ(sc, WMREG_CTRL);
   8381 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8382 	v |= CTRL_SWDPIO(3);
   8383 
   8384 	CSR_WRITE(sc, WMREG_CTRL, v);
   8385 	CSR_WRITE_FLUSH(sc);
   8386 	delay(10);
   8387 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8388 	CSR_WRITE_FLUSH(sc);
   8389 	delay(10);
   8390 	CSR_WRITE(sc, WMREG_CTRL, v);
   8391 	CSR_WRITE_FLUSH(sc);
   8392 	delay(10);
   8393 
   8394 	for (i = 0; i < 16; i++) {
   8395 		data <<= 1;
   8396 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8397 		CSR_WRITE_FLUSH(sc);
   8398 		delay(10);
   8399 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8400 			data |= 1;
   8401 		CSR_WRITE(sc, WMREG_CTRL, v);
   8402 		CSR_WRITE_FLUSH(sc);
   8403 		delay(10);
   8404 	}
   8405 
   8406 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8407 	CSR_WRITE_FLUSH(sc);
   8408 	delay(10);
   8409 	CSR_WRITE(sc, WMREG_CTRL, v);
   8410 	CSR_WRITE_FLUSH(sc);
   8411 	delay(10);
   8412 
   8413 	return data;
   8414 }
   8415 
   8416 #undef MDI_IO
   8417 #undef MDI_DIR
   8418 #undef MDI_CLK
   8419 
   8420 /*
   8421  * wm_gmii_i82543_readreg:	[mii interface function]
   8422  *
   8423  *	Read a PHY register on the GMII (i82543 version).
   8424  */
   8425 static int
   8426 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8427 {
   8428 	struct wm_softc *sc = device_private(self);
   8429 	int rv;
   8430 
   8431 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8432 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8433 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8434 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8435 
   8436 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8437 	    device_xname(sc->sc_dev), phy, reg, rv));
   8438 
   8439 	return rv;
   8440 }
   8441 
   8442 /*
   8443  * wm_gmii_i82543_writereg:	[mii interface function]
   8444  *
   8445  *	Write a PHY register on the GMII (i82543 version).
   8446  */
   8447 static void
   8448 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8449 {
   8450 	struct wm_softc *sc = device_private(self);
   8451 
   8452 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8453 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8454 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8455 	    (MII_COMMAND_START << 30), 32);
   8456 }
   8457 
   8458 /*
   8459  * wm_gmii_i82544_readreg:	[mii interface function]
   8460  *
   8461  *	Read a PHY register on the GMII.
   8462  */
   8463 static int
   8464 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8465 {
   8466 	struct wm_softc *sc = device_private(self);
   8467 	uint32_t mdic = 0;
   8468 	int i, rv;
   8469 
   8470 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8471 	    MDIC_REGADD(reg));
   8472 
   8473 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8474 		mdic = CSR_READ(sc, WMREG_MDIC);
   8475 		if (mdic & MDIC_READY)
   8476 			break;
   8477 		delay(50);
   8478 	}
   8479 
   8480 	if ((mdic & MDIC_READY) == 0) {
   8481 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8482 		    device_xname(sc->sc_dev), phy, reg);
   8483 		rv = 0;
   8484 	} else if (mdic & MDIC_E) {
   8485 #if 0 /* This is normal if no PHY is present. */
   8486 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8487 		    device_xname(sc->sc_dev), phy, reg);
   8488 #endif
   8489 		rv = 0;
   8490 	} else {
   8491 		rv = MDIC_DATA(mdic);
   8492 		if (rv == 0xffff)
   8493 			rv = 0;
   8494 	}
   8495 
   8496 	return rv;
   8497 }
   8498 
   8499 /*
   8500  * wm_gmii_i82544_writereg:	[mii interface function]
   8501  *
   8502  *	Write a PHY register on the GMII.
   8503  */
   8504 static void
   8505 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8506 {
   8507 	struct wm_softc *sc = device_private(self);
   8508 	uint32_t mdic = 0;
   8509 	int i;
   8510 
   8511 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8512 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8513 
   8514 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8515 		mdic = CSR_READ(sc, WMREG_MDIC);
   8516 		if (mdic & MDIC_READY)
   8517 			break;
   8518 		delay(50);
   8519 	}
   8520 
   8521 	if ((mdic & MDIC_READY) == 0)
   8522 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8523 		    device_xname(sc->sc_dev), phy, reg);
   8524 	else if (mdic & MDIC_E)
   8525 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8526 		    device_xname(sc->sc_dev), phy, reg);
   8527 }
   8528 
   8529 /*
   8530  * wm_gmii_i80003_readreg:	[mii interface function]
   8531  *
   8532  *	Read a PHY register on the kumeran
   8533  * This could be handled by the PHY layer if we didn't have to lock the
   8534  * ressource ...
   8535  */
   8536 static int
   8537 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8538 {
   8539 	struct wm_softc *sc = device_private(self);
   8540 	int sem;
   8541 	int rv;
   8542 
   8543 	if (phy != 1) /* only one PHY on kumeran bus */
   8544 		return 0;
   8545 
   8546 	sem = swfwphysem[sc->sc_funcid];
   8547 	if (wm_get_swfw_semaphore(sc, sem)) {
   8548 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8549 		    __func__);
   8550 		return 0;
   8551 	}
   8552 
   8553 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8554 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8555 		    reg >> GG82563_PAGE_SHIFT);
   8556 	} else {
   8557 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8558 		    reg >> GG82563_PAGE_SHIFT);
   8559 	}
   8560 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8561 	delay(200);
   8562 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8563 	delay(200);
   8564 
   8565 	wm_put_swfw_semaphore(sc, sem);
   8566 	return rv;
   8567 }
   8568 
   8569 /*
   8570  * wm_gmii_i80003_writereg:	[mii interface function]
   8571  *
   8572  *	Write a PHY register on the kumeran.
   8573  * This could be handled by the PHY layer if we didn't have to lock the
   8574  * ressource ...
   8575  */
   8576 static void
   8577 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8578 {
   8579 	struct wm_softc *sc = device_private(self);
   8580 	int sem;
   8581 
   8582 	if (phy != 1) /* only one PHY on kumeran bus */
   8583 		return;
   8584 
   8585 	sem = swfwphysem[sc->sc_funcid];
   8586 	if (wm_get_swfw_semaphore(sc, sem)) {
   8587 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8588 		    __func__);
   8589 		return;
   8590 	}
   8591 
   8592 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8593 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8594 		    reg >> GG82563_PAGE_SHIFT);
   8595 	} else {
   8596 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8597 		    reg >> GG82563_PAGE_SHIFT);
   8598 	}
   8599 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8600 	delay(200);
   8601 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8602 	delay(200);
   8603 
   8604 	wm_put_swfw_semaphore(sc, sem);
   8605 }
   8606 
   8607 /*
   8608  * wm_gmii_bm_readreg:	[mii interface function]
   8609  *
   8610  *	Read a PHY register on the kumeran
   8611  * This could be handled by the PHY layer if we didn't have to lock the
   8612  * ressource ...
   8613  */
   8614 static int
   8615 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8616 {
   8617 	struct wm_softc *sc = device_private(self);
   8618 	int sem;
   8619 	int rv;
   8620 
   8621 	sem = swfwphysem[sc->sc_funcid];
   8622 	if (wm_get_swfw_semaphore(sc, sem)) {
   8623 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8624 		    __func__);
   8625 		return 0;
   8626 	}
   8627 
   8628 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8629 		if (phy == 1)
   8630 			wm_gmii_i82544_writereg(self, phy,
   8631 			    MII_IGPHY_PAGE_SELECT, reg);
   8632 		else
   8633 			wm_gmii_i82544_writereg(self, phy,
   8634 			    GG82563_PHY_PAGE_SELECT,
   8635 			    reg >> GG82563_PAGE_SHIFT);
   8636 	}
   8637 
   8638 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8639 	wm_put_swfw_semaphore(sc, sem);
   8640 	return rv;
   8641 }
   8642 
   8643 /*
   8644  * wm_gmii_bm_writereg:	[mii interface function]
   8645  *
   8646  *	Write a PHY register on the kumeran.
   8647  * This could be handled by the PHY layer if we didn't have to lock the
   8648  * ressource ...
   8649  */
   8650 static void
   8651 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8652 {
   8653 	struct wm_softc *sc = device_private(self);
   8654 	int sem;
   8655 
   8656 	sem = swfwphysem[sc->sc_funcid];
   8657 	if (wm_get_swfw_semaphore(sc, sem)) {
   8658 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8659 		    __func__);
   8660 		return;
   8661 	}
   8662 
   8663 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8664 		if (phy == 1)
   8665 			wm_gmii_i82544_writereg(self, phy,
   8666 			    MII_IGPHY_PAGE_SELECT, reg);
   8667 		else
   8668 			wm_gmii_i82544_writereg(self, phy,
   8669 			    GG82563_PHY_PAGE_SELECT,
   8670 			    reg >> GG82563_PAGE_SHIFT);
   8671 	}
   8672 
   8673 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8674 	wm_put_swfw_semaphore(sc, sem);
   8675 }
   8676 
   8677 static void
   8678 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8679 {
   8680 	struct wm_softc *sc = device_private(self);
   8681 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8682 	uint16_t wuce;
   8683 
   8684 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8685 	if (sc->sc_type == WM_T_PCH) {
   8686 		/* XXX e1000 driver do nothing... why? */
   8687 	}
   8688 
   8689 	/* Set page 769 */
   8690 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8691 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8692 
   8693 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8694 
   8695 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8696 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8697 	    wuce | BM_WUC_ENABLE_BIT);
   8698 
   8699 	/* Select page 800 */
   8700 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8701 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8702 
   8703 	/* Write page 800 */
   8704 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8705 
   8706 	if (rd)
   8707 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8708 	else
   8709 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8710 
   8711 	/* Set page 769 */
   8712 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8713 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8714 
   8715 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8716 }
   8717 
   8718 /*
   8719  * wm_gmii_hv_readreg:	[mii interface function]
   8720  *
   8721  *	Read a PHY register on the kumeran
   8722  * This could be handled by the PHY layer if we didn't have to lock the
   8723  * ressource ...
   8724  */
   8725 static int
   8726 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8727 {
   8728 	struct wm_softc *sc = device_private(self);
   8729 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8730 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8731 	uint16_t val;
   8732 	int rv;
   8733 
   8734 	if (wm_get_swfwhw_semaphore(sc)) {
   8735 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8736 		    __func__);
   8737 		return 0;
   8738 	}
   8739 
   8740 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8741 	if (sc->sc_phytype == WMPHY_82577) {
   8742 		/* XXX must write */
   8743 	}
   8744 
   8745 	/* Page 800 works differently than the rest so it has its own func */
   8746 	if (page == BM_WUC_PAGE) {
   8747 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8748 		return val;
   8749 	}
   8750 
   8751 	/*
   8752 	 * Lower than page 768 works differently than the rest so it has its
   8753 	 * own func
   8754 	 */
   8755 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8756 		printf("gmii_hv_readreg!!!\n");
   8757 		return 0;
   8758 	}
   8759 
   8760 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8761 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8762 		    page << BME1000_PAGE_SHIFT);
   8763 	}
   8764 
   8765 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8766 	wm_put_swfwhw_semaphore(sc);
   8767 	return rv;
   8768 }
   8769 
   8770 /*
   8771  * wm_gmii_hv_writereg:	[mii interface function]
   8772  *
   8773  *	Write a PHY register on the kumeran.
   8774  * This could be handled by the PHY layer if we didn't have to lock the
   8775  * ressource ...
   8776  */
   8777 static void
   8778 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8779 {
   8780 	struct wm_softc *sc = device_private(self);
   8781 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8782 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8783 
   8784 	if (wm_get_swfwhw_semaphore(sc)) {
   8785 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8786 		    __func__);
   8787 		return;
   8788 	}
   8789 
   8790 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8791 
   8792 	/* Page 800 works differently than the rest so it has its own func */
   8793 	if (page == BM_WUC_PAGE) {
   8794 		uint16_t tmp;
   8795 
   8796 		tmp = val;
   8797 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8798 		return;
   8799 	}
   8800 
   8801 	/*
   8802 	 * Lower than page 768 works differently than the rest so it has its
   8803 	 * own func
   8804 	 */
   8805 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8806 		printf("gmii_hv_writereg!!!\n");
   8807 		return;
   8808 	}
   8809 
   8810 	/*
   8811 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8812 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8813 	 */
   8814 
   8815 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8816 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8817 		    page << BME1000_PAGE_SHIFT);
   8818 	}
   8819 
   8820 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8821 	wm_put_swfwhw_semaphore(sc);
   8822 }
   8823 
   8824 /*
   8825  * wm_gmii_82580_readreg:	[mii interface function]
   8826  *
   8827  *	Read a PHY register on the 82580 and I350.
   8828  * This could be handled by the PHY layer if we didn't have to lock the
   8829  * ressource ...
   8830  */
   8831 static int
   8832 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8833 {
   8834 	struct wm_softc *sc = device_private(self);
   8835 	int sem;
   8836 	int rv;
   8837 
   8838 	sem = swfwphysem[sc->sc_funcid];
   8839 	if (wm_get_swfw_semaphore(sc, sem)) {
   8840 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8841 		    __func__);
   8842 		return 0;
   8843 	}
   8844 
   8845 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8846 
   8847 	wm_put_swfw_semaphore(sc, sem);
   8848 	return rv;
   8849 }
   8850 
   8851 /*
   8852  * wm_gmii_82580_writereg:	[mii interface function]
   8853  *
   8854  *	Write a PHY register on the 82580 and I350.
   8855  * This could be handled by the PHY layer if we didn't have to lock the
   8856  * ressource ...
   8857  */
   8858 static void
   8859 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8860 {
   8861 	struct wm_softc *sc = device_private(self);
   8862 	int sem;
   8863 
   8864 	sem = swfwphysem[sc->sc_funcid];
   8865 	if (wm_get_swfw_semaphore(sc, sem)) {
   8866 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8867 		    __func__);
   8868 		return;
   8869 	}
   8870 
   8871 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8872 
   8873 	wm_put_swfw_semaphore(sc, sem);
   8874 }
   8875 
   8876 /*
   8877  * wm_gmii_gs40g_readreg:	[mii interface function]
   8878  *
   8879  *	Read a PHY register on the I2100 and I211.
   8880  * This could be handled by the PHY layer if we didn't have to lock the
   8881  * ressource ...
   8882  */
   8883 static int
   8884 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8885 {
   8886 	struct wm_softc *sc = device_private(self);
   8887 	int sem;
   8888 	int page, offset;
   8889 	int rv;
   8890 
   8891 	/* Acquire semaphore */
   8892 	sem = swfwphysem[sc->sc_funcid];
   8893 	if (wm_get_swfw_semaphore(sc, sem)) {
   8894 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8895 		    __func__);
   8896 		return 0;
   8897 	}
   8898 
   8899 	/* Page select */
   8900 	page = reg >> GS40G_PAGE_SHIFT;
   8901 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8902 
   8903 	/* Read reg */
   8904 	offset = reg & GS40G_OFFSET_MASK;
   8905 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8906 
   8907 	wm_put_swfw_semaphore(sc, sem);
   8908 	return rv;
   8909 }
   8910 
   8911 /*
   8912  * wm_gmii_gs40g_writereg:	[mii interface function]
   8913  *
   8914  *	Write a PHY register on the I210 and I211.
   8915  * This could be handled by the PHY layer if we didn't have to lock the
   8916  * ressource ...
   8917  */
   8918 static void
   8919 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8920 {
   8921 	struct wm_softc *sc = device_private(self);
   8922 	int sem;
   8923 	int page, offset;
   8924 
   8925 	/* Acquire semaphore */
   8926 	sem = swfwphysem[sc->sc_funcid];
   8927 	if (wm_get_swfw_semaphore(sc, sem)) {
   8928 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8929 		    __func__);
   8930 		return;
   8931 	}
   8932 
   8933 	/* Page select */
   8934 	page = reg >> GS40G_PAGE_SHIFT;
   8935 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8936 
   8937 	/* Write reg */
   8938 	offset = reg & GS40G_OFFSET_MASK;
   8939 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8940 
   8941 	/* Release semaphore */
   8942 	wm_put_swfw_semaphore(sc, sem);
   8943 }
   8944 
   8945 /*
   8946  * wm_gmii_statchg:	[mii interface function]
   8947  *
   8948  *	Callback from MII layer when media changes.
   8949  */
   8950 static void
   8951 wm_gmii_statchg(struct ifnet *ifp)
   8952 {
   8953 	struct wm_softc *sc = ifp->if_softc;
   8954 	struct mii_data *mii = &sc->sc_mii;
   8955 
   8956 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8957 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8958 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8959 
   8960 	/*
   8961 	 * Get flow control negotiation result.
   8962 	 */
   8963 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8964 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8965 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8966 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8967 	}
   8968 
   8969 	if (sc->sc_flowflags & IFM_FLOW) {
   8970 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8971 			sc->sc_ctrl |= CTRL_TFCE;
   8972 			sc->sc_fcrtl |= FCRTL_XONE;
   8973 		}
   8974 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8975 			sc->sc_ctrl |= CTRL_RFCE;
   8976 	}
   8977 
   8978 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8979 		DPRINTF(WM_DEBUG_LINK,
   8980 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8981 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8982 	} else {
   8983 		DPRINTF(WM_DEBUG_LINK,
   8984 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8985 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8986 	}
   8987 
   8988 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8989 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8990 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8991 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8992 	if (sc->sc_type == WM_T_80003) {
   8993 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8994 		case IFM_1000_T:
   8995 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8996 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8997 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8998 			break;
   8999 		default:
   9000 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9001 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9002 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9003 			break;
   9004 		}
   9005 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9006 	}
   9007 }
   9008 
   9009 /*
   9010  * wm_kmrn_readreg:
   9011  *
   9012  *	Read a kumeran register
   9013  */
   9014 static int
   9015 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9016 {
   9017 	int rv;
   9018 
   9019 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9020 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9021 			aprint_error_dev(sc->sc_dev,
   9022 			    "%s: failed to get semaphore\n", __func__);
   9023 			return 0;
   9024 		}
   9025 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9026 		if (wm_get_swfwhw_semaphore(sc)) {
   9027 			aprint_error_dev(sc->sc_dev,
   9028 			    "%s: failed to get semaphore\n", __func__);
   9029 			return 0;
   9030 		}
   9031 	}
   9032 
   9033 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9034 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9035 	    KUMCTRLSTA_REN);
   9036 	CSR_WRITE_FLUSH(sc);
   9037 	delay(2);
   9038 
   9039 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9040 
   9041 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9042 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9043 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9044 		wm_put_swfwhw_semaphore(sc);
   9045 
   9046 	return rv;
   9047 }
   9048 
   9049 /*
   9050  * wm_kmrn_writereg:
   9051  *
   9052  *	Write a kumeran register
   9053  */
   9054 static void
   9055 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9056 {
   9057 
   9058 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9059 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9060 			aprint_error_dev(sc->sc_dev,
   9061 			    "%s: failed to get semaphore\n", __func__);
   9062 			return;
   9063 		}
   9064 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9065 		if (wm_get_swfwhw_semaphore(sc)) {
   9066 			aprint_error_dev(sc->sc_dev,
   9067 			    "%s: failed to get semaphore\n", __func__);
   9068 			return;
   9069 		}
   9070 	}
   9071 
   9072 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9073 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9074 	    (val & KUMCTRLSTA_MASK));
   9075 
   9076 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9077 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9078 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9079 		wm_put_swfwhw_semaphore(sc);
   9080 }
   9081 
   9082 /* SGMII related */
   9083 
   9084 /*
   9085  * wm_sgmii_uses_mdio
   9086  *
   9087  * Check whether the transaction is to the internal PHY or the external
   9088  * MDIO interface. Return true if it's MDIO.
   9089  */
   9090 static bool
   9091 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9092 {
   9093 	uint32_t reg;
   9094 	bool ismdio = false;
   9095 
   9096 	switch (sc->sc_type) {
   9097 	case WM_T_82575:
   9098 	case WM_T_82576:
   9099 		reg = CSR_READ(sc, WMREG_MDIC);
   9100 		ismdio = ((reg & MDIC_DEST) != 0);
   9101 		break;
   9102 	case WM_T_82580:
   9103 	case WM_T_I350:
   9104 	case WM_T_I354:
   9105 	case WM_T_I210:
   9106 	case WM_T_I211:
   9107 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9108 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9109 		break;
   9110 	default:
   9111 		break;
   9112 	}
   9113 
   9114 	return ismdio;
   9115 }
   9116 
   9117 /*
   9118  * wm_sgmii_readreg:	[mii interface function]
   9119  *
   9120  *	Read a PHY register on the SGMII
   9121  * This could be handled by the PHY layer if we didn't have to lock the
   9122  * ressource ...
   9123  */
   9124 static int
   9125 wm_sgmii_readreg(device_t self, int phy, int reg)
   9126 {
   9127 	struct wm_softc *sc = device_private(self);
   9128 	uint32_t i2ccmd;
   9129 	int i, rv;
   9130 
   9131 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9132 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9133 		    __func__);
   9134 		return 0;
   9135 	}
   9136 
   9137 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9138 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9139 	    | I2CCMD_OPCODE_READ;
   9140 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9141 
   9142 	/* Poll the ready bit */
   9143 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9144 		delay(50);
   9145 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9146 		if (i2ccmd & I2CCMD_READY)
   9147 			break;
   9148 	}
   9149 	if ((i2ccmd & I2CCMD_READY) == 0)
   9150 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9151 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9152 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9153 
   9154 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9155 
   9156 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9157 	return rv;
   9158 }
   9159 
   9160 /*
   9161  * wm_sgmii_writereg:	[mii interface function]
   9162  *
   9163  *	Write a PHY register on the SGMII.
   9164  * This could be handled by the PHY layer if we didn't have to lock the
   9165  * ressource ...
   9166  */
   9167 static void
   9168 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9169 {
   9170 	struct wm_softc *sc = device_private(self);
   9171 	uint32_t i2ccmd;
   9172 	int i;
   9173 	int val_swapped;
   9174 
   9175 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9176 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9177 		    __func__);
   9178 		return;
   9179 	}
   9180 	/* Swap the data bytes for the I2C interface */
   9181 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9182 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9183 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9184 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9185 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9186 
   9187 	/* Poll the ready bit */
   9188 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9189 		delay(50);
   9190 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9191 		if (i2ccmd & I2CCMD_READY)
   9192 			break;
   9193 	}
   9194 	if ((i2ccmd & I2CCMD_READY) == 0)
   9195 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9196 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9197 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9198 
   9199 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9200 }
   9201 
   9202 /* TBI related */
   9203 
   9204 /*
   9205  * wm_tbi_mediainit:
   9206  *
   9207  *	Initialize media for use on 1000BASE-X devices.
   9208  */
   9209 static void
   9210 wm_tbi_mediainit(struct wm_softc *sc)
   9211 {
   9212 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9213 	const char *sep = "";
   9214 
   9215 	if (sc->sc_type < WM_T_82543)
   9216 		sc->sc_tipg = TIPG_WM_DFLT;
   9217 	else
   9218 		sc->sc_tipg = TIPG_LG_DFLT;
   9219 
   9220 	sc->sc_tbi_serdes_anegticks = 5;
   9221 
   9222 	/* Initialize our media structures */
   9223 	sc->sc_mii.mii_ifp = ifp;
   9224 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9225 
   9226 	if ((sc->sc_type >= WM_T_82575)
   9227 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9228 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9229 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9230 	else
   9231 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9232 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9233 
   9234 	/*
   9235 	 * SWD Pins:
   9236 	 *
   9237 	 *	0 = Link LED (output)
   9238 	 *	1 = Loss Of Signal (input)
   9239 	 */
   9240 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9241 
   9242 	/* XXX Perhaps this is only for TBI */
   9243 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9244 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9245 
   9246 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9247 		sc->sc_ctrl &= ~CTRL_LRST;
   9248 
   9249 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9250 
   9251 #define	ADD(ss, mm, dd)							\
   9252 do {									\
   9253 	aprint_normal("%s%s", sep, ss);					\
   9254 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9255 	sep = ", ";							\
   9256 } while (/*CONSTCOND*/0)
   9257 
   9258 	aprint_normal_dev(sc->sc_dev, "");
   9259 
   9260 	/* Only 82545 is LX */
   9261 	if (sc->sc_type == WM_T_82545) {
   9262 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9263 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9264 	} else {
   9265 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9266 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9267 	}
   9268 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9269 	aprint_normal("\n");
   9270 
   9271 #undef ADD
   9272 
   9273 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9274 }
   9275 
   9276 /*
   9277  * wm_tbi_mediachange:	[ifmedia interface function]
   9278  *
   9279  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9280  */
   9281 static int
   9282 wm_tbi_mediachange(struct ifnet *ifp)
   9283 {
   9284 	struct wm_softc *sc = ifp->if_softc;
   9285 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9286 	uint32_t status;
   9287 	int i;
   9288 
   9289 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9290 		/* XXX need some work for >= 82571 and < 82575 */
   9291 		if (sc->sc_type < WM_T_82575)
   9292 			return 0;
   9293 	}
   9294 
   9295 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9296 	    || (sc->sc_type >= WM_T_82575))
   9297 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9298 
   9299 	sc->sc_ctrl &= ~CTRL_LRST;
   9300 	sc->sc_txcw = TXCW_ANE;
   9301 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9302 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9303 	else if (ife->ifm_media & IFM_FDX)
   9304 		sc->sc_txcw |= TXCW_FD;
   9305 	else
   9306 		sc->sc_txcw |= TXCW_HD;
   9307 
   9308 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9309 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9310 
   9311 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9312 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9313 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9314 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9315 	CSR_WRITE_FLUSH(sc);
   9316 	delay(1000);
   9317 
   9318 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9319 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9320 
   9321 	/*
   9322 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9323 	 * optics detect a signal, 0 if they don't.
   9324 	 */
   9325 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9326 		/* Have signal; wait for the link to come up. */
   9327 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9328 			delay(10000);
   9329 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9330 				break;
   9331 		}
   9332 
   9333 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9334 			    device_xname(sc->sc_dev),i));
   9335 
   9336 		status = CSR_READ(sc, WMREG_STATUS);
   9337 		DPRINTF(WM_DEBUG_LINK,
   9338 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9339 			device_xname(sc->sc_dev),status, STATUS_LU));
   9340 		if (status & STATUS_LU) {
   9341 			/* Link is up. */
   9342 			DPRINTF(WM_DEBUG_LINK,
   9343 			    ("%s: LINK: set media -> link up %s\n",
   9344 			    device_xname(sc->sc_dev),
   9345 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9346 
   9347 			/*
   9348 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9349 			 * so we should update sc->sc_ctrl
   9350 			 */
   9351 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9352 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9353 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9354 			if (status & STATUS_FD)
   9355 				sc->sc_tctl |=
   9356 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9357 			else
   9358 				sc->sc_tctl |=
   9359 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9360 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9361 				sc->sc_fcrtl |= FCRTL_XONE;
   9362 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9363 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9364 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9365 				      sc->sc_fcrtl);
   9366 			sc->sc_tbi_linkup = 1;
   9367 		} else {
   9368 			if (i == WM_LINKUP_TIMEOUT)
   9369 				wm_check_for_link(sc);
   9370 			/* Link is down. */
   9371 			DPRINTF(WM_DEBUG_LINK,
   9372 			    ("%s: LINK: set media -> link down\n",
   9373 			    device_xname(sc->sc_dev)));
   9374 			sc->sc_tbi_linkup = 0;
   9375 		}
   9376 	} else {
   9377 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9378 		    device_xname(sc->sc_dev)));
   9379 		sc->sc_tbi_linkup = 0;
   9380 	}
   9381 
   9382 	wm_tbi_serdes_set_linkled(sc);
   9383 
   9384 	return 0;
   9385 }
   9386 
   9387 /*
   9388  * wm_tbi_mediastatus:	[ifmedia interface function]
   9389  *
   9390  *	Get the current interface media status on a 1000BASE-X device.
   9391  */
   9392 static void
   9393 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9394 {
   9395 	struct wm_softc *sc = ifp->if_softc;
   9396 	uint32_t ctrl, status;
   9397 
   9398 	ifmr->ifm_status = IFM_AVALID;
   9399 	ifmr->ifm_active = IFM_ETHER;
   9400 
   9401 	status = CSR_READ(sc, WMREG_STATUS);
   9402 	if ((status & STATUS_LU) == 0) {
   9403 		ifmr->ifm_active |= IFM_NONE;
   9404 		return;
   9405 	}
   9406 
   9407 	ifmr->ifm_status |= IFM_ACTIVE;
   9408 	/* Only 82545 is LX */
   9409 	if (sc->sc_type == WM_T_82545)
   9410 		ifmr->ifm_active |= IFM_1000_LX;
   9411 	else
   9412 		ifmr->ifm_active |= IFM_1000_SX;
   9413 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9414 		ifmr->ifm_active |= IFM_FDX;
   9415 	else
   9416 		ifmr->ifm_active |= IFM_HDX;
   9417 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9418 	if (ctrl & CTRL_RFCE)
   9419 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9420 	if (ctrl & CTRL_TFCE)
   9421 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9422 }
   9423 
   9424 /* XXX TBI only */
   9425 static int
   9426 wm_check_for_link(struct wm_softc *sc)
   9427 {
   9428 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9429 	uint32_t rxcw;
   9430 	uint32_t ctrl;
   9431 	uint32_t status;
   9432 	uint32_t sig;
   9433 
   9434 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9435 		/* XXX need some work for >= 82571 */
   9436 		if (sc->sc_type >= WM_T_82571) {
   9437 			sc->sc_tbi_linkup = 1;
   9438 			return 0;
   9439 		}
   9440 	}
   9441 
   9442 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9443 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9444 	status = CSR_READ(sc, WMREG_STATUS);
   9445 
   9446 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9447 
   9448 	DPRINTF(WM_DEBUG_LINK,
   9449 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9450 		device_xname(sc->sc_dev), __func__,
   9451 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9452 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9453 
   9454 	/*
   9455 	 * SWDPIN   LU RXCW
   9456 	 *      0    0    0
   9457 	 *      0    0    1	(should not happen)
   9458 	 *      0    1    0	(should not happen)
   9459 	 *      0    1    1	(should not happen)
   9460 	 *      1    0    0	Disable autonego and force linkup
   9461 	 *      1    0    1	got /C/ but not linkup yet
   9462 	 *      1    1    0	(linkup)
   9463 	 *      1    1    1	If IFM_AUTO, back to autonego
   9464 	 *
   9465 	 */
   9466 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9467 	    && ((status & STATUS_LU) == 0)
   9468 	    && ((rxcw & RXCW_C) == 0)) {
   9469 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9470 			__func__));
   9471 		sc->sc_tbi_linkup = 0;
   9472 		/* Disable auto-negotiation in the TXCW register */
   9473 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9474 
   9475 		/*
   9476 		 * Force link-up and also force full-duplex.
   9477 		 *
   9478 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9479 		 * so we should update sc->sc_ctrl
   9480 		 */
   9481 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9482 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9483 	} else if (((status & STATUS_LU) != 0)
   9484 	    && ((rxcw & RXCW_C) != 0)
   9485 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9486 		sc->sc_tbi_linkup = 1;
   9487 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9488 			__func__));
   9489 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9490 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9491 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9492 	    && ((rxcw & RXCW_C) != 0)) {
   9493 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9494 	} else {
   9495 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9496 			status));
   9497 	}
   9498 
   9499 	return 0;
   9500 }
   9501 
   9502 /*
   9503  * wm_tbi_tick:
   9504  *
   9505  *	Check the link on TBI devices.
   9506  *	This function acts as mii_tick().
   9507  */
   9508 static void
   9509 wm_tbi_tick(struct wm_softc *sc)
   9510 {
   9511 	struct mii_data *mii = &sc->sc_mii;
   9512 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9513 	uint32_t status;
   9514 
   9515 	KASSERT(WM_CORE_LOCKED(sc));
   9516 
   9517 	status = CSR_READ(sc, WMREG_STATUS);
   9518 
   9519 	/* XXX is this needed? */
   9520 	(void)CSR_READ(sc, WMREG_RXCW);
   9521 	(void)CSR_READ(sc, WMREG_CTRL);
   9522 
   9523 	/* set link status */
   9524 	if ((status & STATUS_LU) == 0) {
   9525 		DPRINTF(WM_DEBUG_LINK,
   9526 		    ("%s: LINK: checklink -> down\n",
   9527 			device_xname(sc->sc_dev)));
   9528 		sc->sc_tbi_linkup = 0;
   9529 	} else if (sc->sc_tbi_linkup == 0) {
   9530 		DPRINTF(WM_DEBUG_LINK,
   9531 		    ("%s: LINK: checklink -> up %s\n",
   9532 			device_xname(sc->sc_dev),
   9533 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9534 		sc->sc_tbi_linkup = 1;
   9535 		sc->sc_tbi_serdes_ticks = 0;
   9536 	}
   9537 
   9538 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9539 		goto setled;
   9540 
   9541 	if ((status & STATUS_LU) == 0) {
   9542 		sc->sc_tbi_linkup = 0;
   9543 		/* If the timer expired, retry autonegotiation */
   9544 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9545 		    && (++sc->sc_tbi_serdes_ticks
   9546 			>= sc->sc_tbi_serdes_anegticks)) {
   9547 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9548 			sc->sc_tbi_serdes_ticks = 0;
   9549 			/*
   9550 			 * Reset the link, and let autonegotiation do
   9551 			 * its thing
   9552 			 */
   9553 			sc->sc_ctrl |= CTRL_LRST;
   9554 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9555 			CSR_WRITE_FLUSH(sc);
   9556 			delay(1000);
   9557 			sc->sc_ctrl &= ~CTRL_LRST;
   9558 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9559 			CSR_WRITE_FLUSH(sc);
   9560 			delay(1000);
   9561 			CSR_WRITE(sc, WMREG_TXCW,
   9562 			    sc->sc_txcw & ~TXCW_ANE);
   9563 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9564 		}
   9565 	}
   9566 
   9567 setled:
   9568 	wm_tbi_serdes_set_linkled(sc);
   9569 }
   9570 
   9571 /* SERDES related */
   9572 static void
   9573 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9574 {
   9575 	uint32_t reg;
   9576 
   9577 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9578 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9579 		return;
   9580 
   9581 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9582 	reg |= PCS_CFG_PCS_EN;
   9583 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9584 
   9585 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9586 	reg &= ~CTRL_EXT_SWDPIN(3);
   9587 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9588 	CSR_WRITE_FLUSH(sc);
   9589 }
   9590 
   9591 static int
   9592 wm_serdes_mediachange(struct ifnet *ifp)
   9593 {
   9594 	struct wm_softc *sc = ifp->if_softc;
   9595 	bool pcs_autoneg = true; /* XXX */
   9596 	uint32_t ctrl_ext, pcs_lctl, reg;
   9597 
   9598 	/* XXX Currently, this function is not called on 8257[12] */
   9599 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9600 	    || (sc->sc_type >= WM_T_82575))
   9601 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9602 
   9603 	wm_serdes_power_up_link_82575(sc);
   9604 
   9605 	sc->sc_ctrl |= CTRL_SLU;
   9606 
   9607 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9608 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9609 
   9610 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9611 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9612 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9613 	case CTRL_EXT_LINK_MODE_SGMII:
   9614 		pcs_autoneg = true;
   9615 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9616 		break;
   9617 	case CTRL_EXT_LINK_MODE_1000KX:
   9618 		pcs_autoneg = false;
   9619 		/* FALLTHROUGH */
   9620 	default:
   9621 		if ((sc->sc_type == WM_T_82575)
   9622 		    || (sc->sc_type == WM_T_82576)) {
   9623 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9624 				pcs_autoneg = false;
   9625 		}
   9626 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9627 		    | CTRL_FRCFDX;
   9628 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9629 	}
   9630 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9631 
   9632 	if (pcs_autoneg) {
   9633 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9634 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9635 
   9636 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9637 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9638 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9639 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9640 	} else
   9641 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9642 
   9643 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9644 
   9645 
   9646 	return 0;
   9647 }
   9648 
   9649 static void
   9650 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9651 {
   9652 	struct wm_softc *sc = ifp->if_softc;
   9653 	struct mii_data *mii = &sc->sc_mii;
   9654 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9655 	uint32_t pcs_adv, pcs_lpab, reg;
   9656 
   9657 	ifmr->ifm_status = IFM_AVALID;
   9658 	ifmr->ifm_active = IFM_ETHER;
   9659 
   9660 	/* Check PCS */
   9661 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9662 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9663 		ifmr->ifm_active |= IFM_NONE;
   9664 		sc->sc_tbi_linkup = 0;
   9665 		goto setled;
   9666 	}
   9667 
   9668 	sc->sc_tbi_linkup = 1;
   9669 	ifmr->ifm_status |= IFM_ACTIVE;
   9670 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9671 	if ((reg & PCS_LSTS_FDX) != 0)
   9672 		ifmr->ifm_active |= IFM_FDX;
   9673 	else
   9674 		ifmr->ifm_active |= IFM_HDX;
   9675 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9676 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9677 		/* Check flow */
   9678 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9679 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9680 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9681 			goto setled;
   9682 		}
   9683 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9684 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9685 		DPRINTF(WM_DEBUG_LINK,
   9686 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9687 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9688 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9689 			mii->mii_media_active |= IFM_FLOW
   9690 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9691 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9692 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9693 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9694 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9695 			mii->mii_media_active |= IFM_FLOW
   9696 			    | IFM_ETH_TXPAUSE;
   9697 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9698 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9699 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9700 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9701 			mii->mii_media_active |= IFM_FLOW
   9702 			    | IFM_ETH_RXPAUSE;
   9703 		} else {
   9704 		}
   9705 	}
   9706 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9707 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9708 setled:
   9709 	wm_tbi_serdes_set_linkled(sc);
   9710 }
   9711 
   9712 /*
   9713  * wm_serdes_tick:
   9714  *
   9715  *	Check the link on serdes devices.
   9716  */
   9717 static void
   9718 wm_serdes_tick(struct wm_softc *sc)
   9719 {
   9720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9721 	struct mii_data *mii = &sc->sc_mii;
   9722 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9723 	uint32_t reg;
   9724 
   9725 	KASSERT(WM_CORE_LOCKED(sc));
   9726 
   9727 	mii->mii_media_status = IFM_AVALID;
   9728 	mii->mii_media_active = IFM_ETHER;
   9729 
   9730 	/* Check PCS */
   9731 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9732 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9733 		mii->mii_media_status |= IFM_ACTIVE;
   9734 		sc->sc_tbi_linkup = 1;
   9735 		sc->sc_tbi_serdes_ticks = 0;
   9736 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9737 		if ((reg & PCS_LSTS_FDX) != 0)
   9738 			mii->mii_media_active |= IFM_FDX;
   9739 		else
   9740 			mii->mii_media_active |= IFM_HDX;
   9741 	} else {
   9742 		mii->mii_media_status |= IFM_NONE;
   9743 		sc->sc_tbi_linkup = 0;
   9744 		    /* If the timer expired, retry autonegotiation */
   9745 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9746 		    && (++sc->sc_tbi_serdes_ticks
   9747 			>= sc->sc_tbi_serdes_anegticks)) {
   9748 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9749 			sc->sc_tbi_serdes_ticks = 0;
   9750 			/* XXX */
   9751 			wm_serdes_mediachange(ifp);
   9752 		}
   9753 	}
   9754 
   9755 	wm_tbi_serdes_set_linkled(sc);
   9756 }
   9757 
   9758 /* SFP related */
   9759 
   9760 static int
   9761 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9762 {
   9763 	uint32_t i2ccmd;
   9764 	int i;
   9765 
   9766 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9767 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9768 
   9769 	/* Poll the ready bit */
   9770 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9771 		delay(50);
   9772 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9773 		if (i2ccmd & I2CCMD_READY)
   9774 			break;
   9775 	}
   9776 	if ((i2ccmd & I2CCMD_READY) == 0)
   9777 		return -1;
   9778 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9779 		return -1;
   9780 
   9781 	*data = i2ccmd & 0x00ff;
   9782 
   9783 	return 0;
   9784 }
   9785 
   9786 static uint32_t
   9787 wm_sfp_get_media_type(struct wm_softc *sc)
   9788 {
   9789 	uint32_t ctrl_ext;
   9790 	uint8_t val = 0;
   9791 	int timeout = 3;
   9792 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9793 	int rv = -1;
   9794 
   9795 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9796 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9797 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9798 	CSR_WRITE_FLUSH(sc);
   9799 
   9800 	/* Read SFP module data */
   9801 	while (timeout) {
   9802 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9803 		if (rv == 0)
   9804 			break;
   9805 		delay(100*1000); /* XXX too big */
   9806 		timeout--;
   9807 	}
   9808 	if (rv != 0)
   9809 		goto out;
   9810 	switch (val) {
   9811 	case SFF_SFP_ID_SFF:
   9812 		aprint_normal_dev(sc->sc_dev,
   9813 		    "Module/Connector soldered to board\n");
   9814 		break;
   9815 	case SFF_SFP_ID_SFP:
   9816 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9817 		break;
   9818 	case SFF_SFP_ID_UNKNOWN:
   9819 		goto out;
   9820 	default:
   9821 		break;
   9822 	}
   9823 
   9824 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9825 	if (rv != 0) {
   9826 		goto out;
   9827 	}
   9828 
   9829 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9830 		mediatype = WM_MEDIATYPE_SERDES;
   9831 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9832 		sc->sc_flags |= WM_F_SGMII;
   9833 		mediatype = WM_MEDIATYPE_COPPER;
   9834 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9835 		sc->sc_flags |= WM_F_SGMII;
   9836 		mediatype = WM_MEDIATYPE_SERDES;
   9837 	}
   9838 
   9839 out:
   9840 	/* Restore I2C interface setting */
   9841 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9842 
   9843 	return mediatype;
   9844 }
   9845 /*
   9846  * NVM related.
   9847  * Microwire, SPI (w/wo EERD) and Flash.
   9848  */
   9849 
   9850 /* Both spi and uwire */
   9851 
   9852 /*
   9853  * wm_eeprom_sendbits:
   9854  *
   9855  *	Send a series of bits to the EEPROM.
   9856  */
   9857 static void
   9858 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9859 {
   9860 	uint32_t reg;
   9861 	int x;
   9862 
   9863 	reg = CSR_READ(sc, WMREG_EECD);
   9864 
   9865 	for (x = nbits; x > 0; x--) {
   9866 		if (bits & (1U << (x - 1)))
   9867 			reg |= EECD_DI;
   9868 		else
   9869 			reg &= ~EECD_DI;
   9870 		CSR_WRITE(sc, WMREG_EECD, reg);
   9871 		CSR_WRITE_FLUSH(sc);
   9872 		delay(2);
   9873 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9874 		CSR_WRITE_FLUSH(sc);
   9875 		delay(2);
   9876 		CSR_WRITE(sc, WMREG_EECD, reg);
   9877 		CSR_WRITE_FLUSH(sc);
   9878 		delay(2);
   9879 	}
   9880 }
   9881 
   9882 /*
   9883  * wm_eeprom_recvbits:
   9884  *
   9885  *	Receive a series of bits from the EEPROM.
   9886  */
   9887 static void
   9888 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9889 {
   9890 	uint32_t reg, val;
   9891 	int x;
   9892 
   9893 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9894 
   9895 	val = 0;
   9896 	for (x = nbits; x > 0; x--) {
   9897 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9898 		CSR_WRITE_FLUSH(sc);
   9899 		delay(2);
   9900 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9901 			val |= (1U << (x - 1));
   9902 		CSR_WRITE(sc, WMREG_EECD, reg);
   9903 		CSR_WRITE_FLUSH(sc);
   9904 		delay(2);
   9905 	}
   9906 	*valp = val;
   9907 }
   9908 
   9909 /* Microwire */
   9910 
   9911 /*
   9912  * wm_nvm_read_uwire:
   9913  *
   9914  *	Read a word from the EEPROM using the MicroWire protocol.
   9915  */
   9916 static int
   9917 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9918 {
   9919 	uint32_t reg, val;
   9920 	int i;
   9921 
   9922 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   9923 		device_xname(sc->sc_dev), __func__));
   9924 
   9925 	for (i = 0; i < wordcnt; i++) {
   9926 		/* Clear SK and DI. */
   9927 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9928 		CSR_WRITE(sc, WMREG_EECD, reg);
   9929 
   9930 		/*
   9931 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9932 		 * and Xen.
   9933 		 *
   9934 		 * We use this workaround only for 82540 because qemu's
   9935 		 * e1000 act as 82540.
   9936 		 */
   9937 		if (sc->sc_type == WM_T_82540) {
   9938 			reg |= EECD_SK;
   9939 			CSR_WRITE(sc, WMREG_EECD, reg);
   9940 			reg &= ~EECD_SK;
   9941 			CSR_WRITE(sc, WMREG_EECD, reg);
   9942 			CSR_WRITE_FLUSH(sc);
   9943 			delay(2);
   9944 		}
   9945 		/* XXX: end of workaround */
   9946 
   9947 		/* Set CHIP SELECT. */
   9948 		reg |= EECD_CS;
   9949 		CSR_WRITE(sc, WMREG_EECD, reg);
   9950 		CSR_WRITE_FLUSH(sc);
   9951 		delay(2);
   9952 
   9953 		/* Shift in the READ command. */
   9954 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9955 
   9956 		/* Shift in address. */
   9957 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9958 
   9959 		/* Shift out the data. */
   9960 		wm_eeprom_recvbits(sc, &val, 16);
   9961 		data[i] = val & 0xffff;
   9962 
   9963 		/* Clear CHIP SELECT. */
   9964 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9965 		CSR_WRITE(sc, WMREG_EECD, reg);
   9966 		CSR_WRITE_FLUSH(sc);
   9967 		delay(2);
   9968 	}
   9969 
   9970 	return 0;
   9971 }
   9972 
   9973 /* SPI */
   9974 
   9975 /*
   9976  * Set SPI and FLASH related information from the EECD register.
   9977  * For 82541 and 82547, the word size is taken from EEPROM.
   9978  */
   9979 static int
   9980 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9981 {
   9982 	int size;
   9983 	uint32_t reg;
   9984 	uint16_t data;
   9985 
   9986 	reg = CSR_READ(sc, WMREG_EECD);
   9987 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9988 
   9989 	/* Read the size of NVM from EECD by default */
   9990 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9991 	switch (sc->sc_type) {
   9992 	case WM_T_82541:
   9993 	case WM_T_82541_2:
   9994 	case WM_T_82547:
   9995 	case WM_T_82547_2:
   9996 		/* Set dummy value to access EEPROM */
   9997 		sc->sc_nvm_wordsize = 64;
   9998 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9999 		reg = data;
   10000 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10001 		if (size == 0)
   10002 			size = 6; /* 64 word size */
   10003 		else
   10004 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10005 		break;
   10006 	case WM_T_80003:
   10007 	case WM_T_82571:
   10008 	case WM_T_82572:
   10009 	case WM_T_82573: /* SPI case */
   10010 	case WM_T_82574: /* SPI case */
   10011 	case WM_T_82583: /* SPI case */
   10012 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10013 		if (size > 14)
   10014 			size = 14;
   10015 		break;
   10016 	case WM_T_82575:
   10017 	case WM_T_82576:
   10018 	case WM_T_82580:
   10019 	case WM_T_I350:
   10020 	case WM_T_I354:
   10021 	case WM_T_I210:
   10022 	case WM_T_I211:
   10023 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10024 		if (size > 15)
   10025 			size = 15;
   10026 		break;
   10027 	default:
   10028 		aprint_error_dev(sc->sc_dev,
   10029 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10030 		return -1;
   10031 		break;
   10032 	}
   10033 
   10034 	sc->sc_nvm_wordsize = 1 << size;
   10035 
   10036 	return 0;
   10037 }
   10038 
   10039 /*
   10040  * wm_nvm_ready_spi:
   10041  *
   10042  *	Wait for a SPI EEPROM to be ready for commands.
   10043  */
   10044 static int
   10045 wm_nvm_ready_spi(struct wm_softc *sc)
   10046 {
   10047 	uint32_t val;
   10048 	int usec;
   10049 
   10050 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10051 		device_xname(sc->sc_dev), __func__));
   10052 
   10053 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10054 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10055 		wm_eeprom_recvbits(sc, &val, 8);
   10056 		if ((val & SPI_SR_RDY) == 0)
   10057 			break;
   10058 	}
   10059 	if (usec >= SPI_MAX_RETRIES) {
   10060 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10061 		return 1;
   10062 	}
   10063 	return 0;
   10064 }
   10065 
   10066 /*
   10067  * wm_nvm_read_spi:
   10068  *
   10069  *	Read a work from the EEPROM using the SPI protocol.
   10070  */
   10071 static int
   10072 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10073 {
   10074 	uint32_t reg, val;
   10075 	int i;
   10076 	uint8_t opc;
   10077 
   10078 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10079 		device_xname(sc->sc_dev), __func__));
   10080 
   10081 	/* Clear SK and CS. */
   10082 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10083 	CSR_WRITE(sc, WMREG_EECD, reg);
   10084 	CSR_WRITE_FLUSH(sc);
   10085 	delay(2);
   10086 
   10087 	if (wm_nvm_ready_spi(sc))
   10088 		return 1;
   10089 
   10090 	/* Toggle CS to flush commands. */
   10091 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10092 	CSR_WRITE_FLUSH(sc);
   10093 	delay(2);
   10094 	CSR_WRITE(sc, WMREG_EECD, reg);
   10095 	CSR_WRITE_FLUSH(sc);
   10096 	delay(2);
   10097 
   10098 	opc = SPI_OPC_READ;
   10099 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10100 		opc |= SPI_OPC_A8;
   10101 
   10102 	wm_eeprom_sendbits(sc, opc, 8);
   10103 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10104 
   10105 	for (i = 0; i < wordcnt; i++) {
   10106 		wm_eeprom_recvbits(sc, &val, 16);
   10107 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10108 	}
   10109 
   10110 	/* Raise CS and clear SK. */
   10111 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10112 	CSR_WRITE(sc, WMREG_EECD, reg);
   10113 	CSR_WRITE_FLUSH(sc);
   10114 	delay(2);
   10115 
   10116 	return 0;
   10117 }
   10118 
   10119 /* Using with EERD */
   10120 
   10121 static int
   10122 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10123 {
   10124 	uint32_t attempts = 100000;
   10125 	uint32_t i, reg = 0;
   10126 	int32_t done = -1;
   10127 
   10128 	for (i = 0; i < attempts; i++) {
   10129 		reg = CSR_READ(sc, rw);
   10130 
   10131 		if (reg & EERD_DONE) {
   10132 			done = 0;
   10133 			break;
   10134 		}
   10135 		delay(5);
   10136 	}
   10137 
   10138 	return done;
   10139 }
   10140 
   10141 static int
   10142 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10143     uint16_t *data)
   10144 {
   10145 	int i, eerd = 0;
   10146 	int error = 0;
   10147 
   10148 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10149 		device_xname(sc->sc_dev), __func__));
   10150 
   10151 	for (i = 0; i < wordcnt; i++) {
   10152 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10153 
   10154 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10155 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10156 		if (error != 0)
   10157 			break;
   10158 
   10159 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10160 	}
   10161 
   10162 	return error;
   10163 }
   10164 
   10165 /* Flash */
   10166 
   10167 static int
   10168 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10169 {
   10170 	uint32_t eecd;
   10171 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10172 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10173 	uint8_t sig_byte = 0;
   10174 
   10175 	switch (sc->sc_type) {
   10176 	case WM_T_PCH_SPT:
   10177 		/*
   10178 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10179 		 * sector valid bits from the NVM.
   10180 		 */
   10181 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10182 		if ((*bank == 0) || (*bank == 1)) {
   10183 			aprint_error_dev(sc->sc_dev,
   10184 					 "%s: no valid NVM bank present\n",
   10185 				__func__);
   10186 			return -1;
   10187 		} else {
   10188 			*bank = *bank - 2;
   10189 			return 0;
   10190 		}
   10191 	case WM_T_ICH8:
   10192 	case WM_T_ICH9:
   10193 		eecd = CSR_READ(sc, WMREG_EECD);
   10194 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10195 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10196 			return 0;
   10197 		}
   10198 		/* FALLTHROUGH */
   10199 	default:
   10200 		/* Default to 0 */
   10201 		*bank = 0;
   10202 
   10203 		/* Check bank 0 */
   10204 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10205 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10206 			*bank = 0;
   10207 			return 0;
   10208 		}
   10209 
   10210 		/* Check bank 1 */
   10211 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10212 		    &sig_byte);
   10213 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10214 			*bank = 1;
   10215 			return 0;
   10216 		}
   10217 	}
   10218 
   10219 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10220 		device_xname(sc->sc_dev)));
   10221 	return -1;
   10222 }
   10223 
   10224 /******************************************************************************
   10225  * This function does initial flash setup so that a new read/write/erase cycle
   10226  * can be started.
   10227  *
   10228  * sc - The pointer to the hw structure
   10229  ****************************************************************************/
   10230 static int32_t
   10231 wm_ich8_cycle_init(struct wm_softc *sc)
   10232 {
   10233 	uint16_t hsfsts;
   10234 	int32_t error = 1;
   10235 	int32_t i     = 0;
   10236 
   10237 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10238 
   10239 	/* May be check the Flash Des Valid bit in Hw status */
   10240 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10241 		return error;
   10242 	}
   10243 
   10244 	/* Clear FCERR in Hw status by writing 1 */
   10245 	/* Clear DAEL in Hw status by writing a 1 */
   10246 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10247 
   10248 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10249 
   10250 	/*
   10251 	 * Either we should have a hardware SPI cycle in progress bit to check
   10252 	 * against, in order to start a new cycle or FDONE bit should be
   10253 	 * changed in the hardware so that it is 1 after harware reset, which
   10254 	 * can then be used as an indication whether a cycle is in progress or
   10255 	 * has been completed .. we should also have some software semaphore
   10256 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10257 	 * threads access to those bits can be sequentiallized or a way so that
   10258 	 * 2 threads dont start the cycle at the same time
   10259 	 */
   10260 
   10261 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10262 		/*
   10263 		 * There is no cycle running at present, so we can start a
   10264 		 * cycle
   10265 		 */
   10266 
   10267 		/* Begin by setting Flash Cycle Done. */
   10268 		hsfsts |= HSFSTS_DONE;
   10269 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10270 		error = 0;
   10271 	} else {
   10272 		/*
   10273 		 * otherwise poll for sometime so the current cycle has a
   10274 		 * chance to end before giving up.
   10275 		 */
   10276 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10277 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10278 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10279 				error = 0;
   10280 				break;
   10281 			}
   10282 			delay(1);
   10283 		}
   10284 		if (error == 0) {
   10285 			/*
   10286 			 * Successful in waiting for previous cycle to timeout,
   10287 			 * now set the Flash Cycle Done.
   10288 			 */
   10289 			hsfsts |= HSFSTS_DONE;
   10290 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10291 		}
   10292 	}
   10293 	return error;
   10294 }
   10295 
   10296 /******************************************************************************
   10297  * This function starts a flash cycle and waits for its completion
   10298  *
   10299  * sc - The pointer to the hw structure
   10300  ****************************************************************************/
   10301 static int32_t
   10302 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10303 {
   10304 	uint16_t hsflctl;
   10305 	uint16_t hsfsts;
   10306 	int32_t error = 1;
   10307 	uint32_t i = 0;
   10308 
   10309 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10310 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10311 	hsflctl |= HSFCTL_GO;
   10312 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10313 
   10314 	/* Wait till FDONE bit is set to 1 */
   10315 	do {
   10316 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10317 		if (hsfsts & HSFSTS_DONE)
   10318 			break;
   10319 		delay(1);
   10320 		i++;
   10321 	} while (i < timeout);
   10322 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10323 		error = 0;
   10324 
   10325 	return error;
   10326 }
   10327 
   10328 /******************************************************************************
   10329  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10330  *
   10331  * sc - The pointer to the hw structure
   10332  * index - The index of the byte or word to read.
   10333  * size - Size of data to read, 1=byte 2=word, 4=dword
   10334  * data - Pointer to the word to store the value read.
   10335  *****************************************************************************/
   10336 static int32_t
   10337 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10338     uint32_t size, uint32_t *data)
   10339 {
   10340 	uint16_t hsfsts;
   10341 	uint16_t hsflctl;
   10342 	uint32_t flash_linear_address;
   10343 	uint32_t flash_data = 0;
   10344 	int32_t error = 1;
   10345 	int32_t count = 0;
   10346 
   10347 	if (size < 1  || size > 4 || data == 0x0 ||
   10348 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10349 		return error;
   10350 
   10351 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10352 	    sc->sc_ich8_flash_base;
   10353 
   10354 	do {
   10355 		delay(1);
   10356 		/* Steps */
   10357 		error = wm_ich8_cycle_init(sc);
   10358 		if (error)
   10359 			break;
   10360 
   10361 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10362 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10363 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10364 		    & HSFCTL_BCOUNT_MASK;
   10365 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10366 		if (sc->sc_type == WM_T_PCH_SPT) {
   10367 			/*
   10368 			 * In SPT, This register is in Lan memory space, not
   10369 			 * flash. Therefore, only 32 bit access is supported.
   10370 			 */
   10371 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10372 			    (uint32_t)hsflctl);
   10373 		} else
   10374 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10375 
   10376 		/*
   10377 		 * Write the last 24 bits of index into Flash Linear address
   10378 		 * field in Flash Address
   10379 		 */
   10380 		/* TODO: TBD maybe check the index against the size of flash */
   10381 
   10382 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10383 
   10384 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10385 
   10386 		/*
   10387 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10388 		 * the whole sequence a few more times, else read in (shift in)
   10389 		 * the Flash Data0, the order is least significant byte first
   10390 		 * msb to lsb
   10391 		 */
   10392 		if (error == 0) {
   10393 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10394 			if (size == 1)
   10395 				*data = (uint8_t)(flash_data & 0x000000FF);
   10396 			else if (size == 2)
   10397 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10398 			else if (size == 4)
   10399 				*data = (uint32_t)flash_data;
   10400 			break;
   10401 		} else {
   10402 			/*
   10403 			 * If we've gotten here, then things are probably
   10404 			 * completely hosed, but if the error condition is
   10405 			 * detected, it won't hurt to give it another try...
   10406 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10407 			 */
   10408 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10409 			if (hsfsts & HSFSTS_ERR) {
   10410 				/* Repeat for some time before giving up. */
   10411 				continue;
   10412 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10413 				break;
   10414 		}
   10415 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10416 
   10417 	return error;
   10418 }
   10419 
   10420 /******************************************************************************
   10421  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10422  *
   10423  * sc - pointer to wm_hw structure
   10424  * index - The index of the byte to read.
   10425  * data - Pointer to a byte to store the value read.
   10426  *****************************************************************************/
   10427 static int32_t
   10428 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10429 {
   10430 	int32_t status;
   10431 	uint32_t word = 0;
   10432 
   10433 	status = wm_read_ich8_data(sc, index, 1, &word);
   10434 	if (status == 0)
   10435 		*data = (uint8_t)word;
   10436 	else
   10437 		*data = 0;
   10438 
   10439 	return status;
   10440 }
   10441 
   10442 /******************************************************************************
   10443  * Reads a word from the NVM using the ICH8 flash access registers.
   10444  *
   10445  * sc - pointer to wm_hw structure
   10446  * index - The starting byte index of the word to read.
   10447  * data - Pointer to a word to store the value read.
   10448  *****************************************************************************/
   10449 static int32_t
   10450 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10451 {
   10452 	int32_t status;
   10453 	uint32_t word = 0;
   10454 
   10455 	status = wm_read_ich8_data(sc, index, 2, &word);
   10456 	if (status == 0)
   10457 		*data = (uint16_t)word;
   10458 	else
   10459 		*data = 0;
   10460 
   10461 	return status;
   10462 }
   10463 
   10464 /******************************************************************************
   10465  * Reads a dword from the NVM using the ICH8 flash access registers.
   10466  *
   10467  * sc - pointer to wm_hw structure
   10468  * index - The starting byte index of the word to read.
   10469  * data - Pointer to a word to store the value read.
   10470  *****************************************************************************/
   10471 static int32_t
   10472 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10473 {
   10474 	int32_t status;
   10475 
   10476 	status = wm_read_ich8_data(sc, index, 4, data);
   10477 	return status;
   10478 }
   10479 
   10480 /******************************************************************************
   10481  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10482  * register.
   10483  *
   10484  * sc - Struct containing variables accessed by shared code
   10485  * offset - offset of word in the EEPROM to read
   10486  * data - word read from the EEPROM
   10487  * words - number of words to read
   10488  *****************************************************************************/
   10489 static int
   10490 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10491 {
   10492 	int32_t  error = 0;
   10493 	uint32_t flash_bank = 0;
   10494 	uint32_t act_offset = 0;
   10495 	uint32_t bank_offset = 0;
   10496 	uint16_t word = 0;
   10497 	uint16_t i = 0;
   10498 
   10499 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10500 		device_xname(sc->sc_dev), __func__));
   10501 
   10502 	/*
   10503 	 * We need to know which is the valid flash bank.  In the event
   10504 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10505 	 * managing flash_bank.  So it cannot be trusted and needs
   10506 	 * to be updated with each read.
   10507 	 */
   10508 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10509 	if (error) {
   10510 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10511 			device_xname(sc->sc_dev)));
   10512 		flash_bank = 0;
   10513 	}
   10514 
   10515 	/*
   10516 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10517 	 * size
   10518 	 */
   10519 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10520 
   10521 	error = wm_get_swfwhw_semaphore(sc);
   10522 	if (error) {
   10523 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10524 		    __func__);
   10525 		return error;
   10526 	}
   10527 
   10528 	for (i = 0; i < words; i++) {
   10529 		/* The NVM part needs a byte offset, hence * 2 */
   10530 		act_offset = bank_offset + ((offset + i) * 2);
   10531 		error = wm_read_ich8_word(sc, act_offset, &word);
   10532 		if (error) {
   10533 			aprint_error_dev(sc->sc_dev,
   10534 			    "%s: failed to read NVM\n", __func__);
   10535 			break;
   10536 		}
   10537 		data[i] = word;
   10538 	}
   10539 
   10540 	wm_put_swfwhw_semaphore(sc);
   10541 	return error;
   10542 }
   10543 
   10544 /******************************************************************************
   10545  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10546  * register.
   10547  *
   10548  * sc - Struct containing variables accessed by shared code
   10549  * offset - offset of word in the EEPROM to read
   10550  * data - word read from the EEPROM
   10551  * words - number of words to read
   10552  *****************************************************************************/
   10553 static int
   10554 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10555 {
   10556 	int32_t  error = 0;
   10557 	uint32_t flash_bank = 0;
   10558 	uint32_t act_offset = 0;
   10559 	uint32_t bank_offset = 0;
   10560 	uint32_t dword = 0;
   10561 	uint16_t i = 0;
   10562 
   10563 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10564 		device_xname(sc->sc_dev), __func__));
   10565 
   10566 	/*
   10567 	 * We need to know which is the valid flash bank.  In the event
   10568 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10569 	 * managing flash_bank.  So it cannot be trusted and needs
   10570 	 * to be updated with each read.
   10571 	 */
   10572 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10573 	if (error) {
   10574 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10575 			device_xname(sc->sc_dev)));
   10576 		flash_bank = 0;
   10577 	}
   10578 
   10579 	/*
   10580 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10581 	 * size
   10582 	 */
   10583 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10584 
   10585 	error = wm_get_swfwhw_semaphore(sc);
   10586 	if (error) {
   10587 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10588 		    __func__);
   10589 		return error;
   10590 	}
   10591 
   10592 	for (i = 0; i < words; i++) {
   10593 		/* The NVM part needs a byte offset, hence * 2 */
   10594 		act_offset = bank_offset + ((offset + i) * 2);
   10595 		/* but we must read dword aligned, so mask ... */
   10596 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10597 		if (error) {
   10598 			aprint_error_dev(sc->sc_dev,
   10599 			    "%s: failed to read NVM\n", __func__);
   10600 			break;
   10601 		}
   10602 		/* ... and pick out low or high word */
   10603 		if ((act_offset & 0x2) == 0)
   10604 			data[i] = (uint16_t)(dword & 0xFFFF);
   10605 		else
   10606 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10607 	}
   10608 
   10609 	wm_put_swfwhw_semaphore(sc);
   10610 	return error;
   10611 }
   10612 
   10613 /* iNVM */
   10614 
   10615 static int
   10616 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10617 {
   10618 	int32_t  rv = 0;
   10619 	uint32_t invm_dword;
   10620 	uint16_t i;
   10621 	uint8_t record_type, word_address;
   10622 
   10623 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10624 		device_xname(sc->sc_dev), __func__));
   10625 
   10626 	for (i = 0; i < INVM_SIZE; i++) {
   10627 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10628 		/* Get record type */
   10629 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10630 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10631 			break;
   10632 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10633 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10634 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10635 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10636 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10637 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10638 			if (word_address == address) {
   10639 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10640 				rv = 0;
   10641 				break;
   10642 			}
   10643 		}
   10644 	}
   10645 
   10646 	return rv;
   10647 }
   10648 
   10649 static int
   10650 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10651 {
   10652 	int rv = 0;
   10653 	int i;
   10654 
   10655 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10656 		device_xname(sc->sc_dev), __func__));
   10657 
   10658 	for (i = 0; i < words; i++) {
   10659 		switch (offset + i) {
   10660 		case NVM_OFF_MACADDR:
   10661 		case NVM_OFF_MACADDR1:
   10662 		case NVM_OFF_MACADDR2:
   10663 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10664 			if (rv != 0) {
   10665 				data[i] = 0xffff;
   10666 				rv = -1;
   10667 			}
   10668 			break;
   10669 		case NVM_OFF_CFG2:
   10670 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10671 			if (rv != 0) {
   10672 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10673 				rv = 0;
   10674 			}
   10675 			break;
   10676 		case NVM_OFF_CFG4:
   10677 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10678 			if (rv != 0) {
   10679 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10680 				rv = 0;
   10681 			}
   10682 			break;
   10683 		case NVM_OFF_LED_1_CFG:
   10684 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10685 			if (rv != 0) {
   10686 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10687 				rv = 0;
   10688 			}
   10689 			break;
   10690 		case NVM_OFF_LED_0_2_CFG:
   10691 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10692 			if (rv != 0) {
   10693 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10694 				rv = 0;
   10695 			}
   10696 			break;
   10697 		case NVM_OFF_ID_LED_SETTINGS:
   10698 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10699 			if (rv != 0) {
   10700 				*data = ID_LED_RESERVED_FFFF;
   10701 				rv = 0;
   10702 			}
   10703 			break;
   10704 		default:
   10705 			DPRINTF(WM_DEBUG_NVM,
   10706 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10707 			*data = NVM_RESERVED_WORD;
   10708 			break;
   10709 		}
   10710 	}
   10711 
   10712 	return rv;
   10713 }
   10714 
   10715 /* Lock, detecting NVM type, validate checksum, version and read */
   10716 
   10717 /*
   10718  * wm_nvm_acquire:
   10719  *
   10720  *	Perform the EEPROM handshake required on some chips.
   10721  */
   10722 static int
   10723 wm_nvm_acquire(struct wm_softc *sc)
   10724 {
   10725 	uint32_t reg;
   10726 	int x;
   10727 	int ret = 0;
   10728 
   10729 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10730 		device_xname(sc->sc_dev), __func__));
   10731 
   10732 	/* Always success */
   10733 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10734 		return 0;
   10735 
   10736 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10737 		ret = wm_get_swfwhw_semaphore(sc);
   10738 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10739 		/* This will also do wm_get_swsm_semaphore() if needed */
   10740 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10741 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10742 		ret = wm_get_swsm_semaphore(sc);
   10743 	}
   10744 
   10745 	if (ret) {
   10746 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10747 			__func__);
   10748 		return 1;
   10749 	}
   10750 
   10751 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10752 		reg = CSR_READ(sc, WMREG_EECD);
   10753 
   10754 		/* Request EEPROM access. */
   10755 		reg |= EECD_EE_REQ;
   10756 		CSR_WRITE(sc, WMREG_EECD, reg);
   10757 
   10758 		/* ..and wait for it to be granted. */
   10759 		for (x = 0; x < 1000; x++) {
   10760 			reg = CSR_READ(sc, WMREG_EECD);
   10761 			if (reg & EECD_EE_GNT)
   10762 				break;
   10763 			delay(5);
   10764 		}
   10765 		if ((reg & EECD_EE_GNT) == 0) {
   10766 			aprint_error_dev(sc->sc_dev,
   10767 			    "could not acquire EEPROM GNT\n");
   10768 			reg &= ~EECD_EE_REQ;
   10769 			CSR_WRITE(sc, WMREG_EECD, reg);
   10770 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10771 				wm_put_swfwhw_semaphore(sc);
   10772 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10773 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10774 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10775 				wm_put_swsm_semaphore(sc);
   10776 			return 1;
   10777 		}
   10778 	}
   10779 
   10780 	return 0;
   10781 }
   10782 
   10783 /*
   10784  * wm_nvm_release:
   10785  *
   10786  *	Release the EEPROM mutex.
   10787  */
   10788 static void
   10789 wm_nvm_release(struct wm_softc *sc)
   10790 {
   10791 	uint32_t reg;
   10792 
   10793 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10794 		device_xname(sc->sc_dev), __func__));
   10795 
   10796 	/* Always success */
   10797 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10798 		return;
   10799 
   10800 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10801 		reg = CSR_READ(sc, WMREG_EECD);
   10802 		reg &= ~EECD_EE_REQ;
   10803 		CSR_WRITE(sc, WMREG_EECD, reg);
   10804 	}
   10805 
   10806 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10807 		wm_put_swfwhw_semaphore(sc);
   10808 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10809 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10810 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10811 		wm_put_swsm_semaphore(sc);
   10812 }
   10813 
   10814 static int
   10815 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10816 {
   10817 	uint32_t eecd = 0;
   10818 
   10819 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10820 	    || sc->sc_type == WM_T_82583) {
   10821 		eecd = CSR_READ(sc, WMREG_EECD);
   10822 
   10823 		/* Isolate bits 15 & 16 */
   10824 		eecd = ((eecd >> 15) & 0x03);
   10825 
   10826 		/* If both bits are set, device is Flash type */
   10827 		if (eecd == 0x03)
   10828 			return 0;
   10829 	}
   10830 	return 1;
   10831 }
   10832 
   10833 static int
   10834 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10835 {
   10836 	uint32_t eec;
   10837 
   10838 	eec = CSR_READ(sc, WMREG_EEC);
   10839 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10840 		return 1;
   10841 
   10842 	return 0;
   10843 }
   10844 
   10845 /*
   10846  * wm_nvm_validate_checksum
   10847  *
   10848  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10849  */
   10850 static int
   10851 wm_nvm_validate_checksum(struct wm_softc *sc)
   10852 {
   10853 	uint16_t checksum;
   10854 	uint16_t eeprom_data;
   10855 #ifdef WM_DEBUG
   10856 	uint16_t csum_wordaddr, valid_checksum;
   10857 #endif
   10858 	int i;
   10859 
   10860 	checksum = 0;
   10861 
   10862 	/* Don't check for I211 */
   10863 	if (sc->sc_type == WM_T_I211)
   10864 		return 0;
   10865 
   10866 #ifdef WM_DEBUG
   10867 	if (sc->sc_type == WM_T_PCH_LPT) {
   10868 		csum_wordaddr = NVM_OFF_COMPAT;
   10869 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10870 	} else {
   10871 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10872 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10873 	}
   10874 
   10875 	/* Dump EEPROM image for debug */
   10876 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10877 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10878 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10879 		/* XXX PCH_SPT? */
   10880 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10881 		if ((eeprom_data & valid_checksum) == 0) {
   10882 			DPRINTF(WM_DEBUG_NVM,
   10883 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10884 				device_xname(sc->sc_dev), eeprom_data,
   10885 				    valid_checksum));
   10886 		}
   10887 	}
   10888 
   10889 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10890 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10891 		for (i = 0; i < NVM_SIZE; i++) {
   10892 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10893 				printf("XXXX ");
   10894 			else
   10895 				printf("%04hx ", eeprom_data);
   10896 			if (i % 8 == 7)
   10897 				printf("\n");
   10898 		}
   10899 	}
   10900 
   10901 #endif /* WM_DEBUG */
   10902 
   10903 	for (i = 0; i < NVM_SIZE; i++) {
   10904 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10905 			return 1;
   10906 		checksum += eeprom_data;
   10907 	}
   10908 
   10909 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10910 #ifdef WM_DEBUG
   10911 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10912 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10913 #endif
   10914 	}
   10915 
   10916 	return 0;
   10917 }
   10918 
   10919 static void
   10920 wm_nvm_version_invm(struct wm_softc *sc)
   10921 {
   10922 	uint32_t dword;
   10923 
   10924 	/*
   10925 	 * Linux's code to decode version is very strange, so we don't
   10926 	 * obey that algorithm and just use word 61 as the document.
   10927 	 * Perhaps it's not perfect though...
   10928 	 *
   10929 	 * Example:
   10930 	 *
   10931 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10932 	 */
   10933 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10934 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10935 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10936 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10937 }
   10938 
   10939 static void
   10940 wm_nvm_version(struct wm_softc *sc)
   10941 {
   10942 	uint16_t major, minor, build, patch;
   10943 	uint16_t uid0, uid1;
   10944 	uint16_t nvm_data;
   10945 	uint16_t off;
   10946 	bool check_version = false;
   10947 	bool check_optionrom = false;
   10948 	bool have_build = false;
   10949 
   10950 	/*
   10951 	 * Version format:
   10952 	 *
   10953 	 * XYYZ
   10954 	 * X0YZ
   10955 	 * X0YY
   10956 	 *
   10957 	 * Example:
   10958 	 *
   10959 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10960 	 *	82571	0x50a6	5.10.6?
   10961 	 *	82572	0x506a	5.6.10?
   10962 	 *	82572EI	0x5069	5.6.9?
   10963 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10964 	 *		0x2013	2.1.3?
   10965 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10966 	 */
   10967 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10968 	switch (sc->sc_type) {
   10969 	case WM_T_82571:
   10970 	case WM_T_82572:
   10971 	case WM_T_82574:
   10972 	case WM_T_82583:
   10973 		check_version = true;
   10974 		check_optionrom = true;
   10975 		have_build = true;
   10976 		break;
   10977 	case WM_T_82575:
   10978 	case WM_T_82576:
   10979 	case WM_T_82580:
   10980 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10981 			check_version = true;
   10982 		break;
   10983 	case WM_T_I211:
   10984 		wm_nvm_version_invm(sc);
   10985 		goto printver;
   10986 	case WM_T_I210:
   10987 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10988 			wm_nvm_version_invm(sc);
   10989 			goto printver;
   10990 		}
   10991 		/* FALLTHROUGH */
   10992 	case WM_T_I350:
   10993 	case WM_T_I354:
   10994 		check_version = true;
   10995 		check_optionrom = true;
   10996 		break;
   10997 	default:
   10998 		return;
   10999 	}
   11000 	if (check_version) {
   11001 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11002 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11003 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11004 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11005 			build = nvm_data & NVM_BUILD_MASK;
   11006 			have_build = true;
   11007 		} else
   11008 			minor = nvm_data & 0x00ff;
   11009 
   11010 		/* Decimal */
   11011 		minor = (minor / 16) * 10 + (minor % 16);
   11012 		sc->sc_nvm_ver_major = major;
   11013 		sc->sc_nvm_ver_minor = minor;
   11014 
   11015 printver:
   11016 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11017 		    sc->sc_nvm_ver_minor);
   11018 		if (have_build) {
   11019 			sc->sc_nvm_ver_build = build;
   11020 			aprint_verbose(".%d", build);
   11021 		}
   11022 	}
   11023 	if (check_optionrom) {
   11024 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11025 		/* Option ROM Version */
   11026 		if ((off != 0x0000) && (off != 0xffff)) {
   11027 			off += NVM_COMBO_VER_OFF;
   11028 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11029 			wm_nvm_read(sc, off, 1, &uid0);
   11030 			if ((uid0 != 0) && (uid0 != 0xffff)
   11031 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11032 				/* 16bits */
   11033 				major = uid0 >> 8;
   11034 				build = (uid0 << 8) | (uid1 >> 8);
   11035 				patch = uid1 & 0x00ff;
   11036 				aprint_verbose(", option ROM Version %d.%d.%d",
   11037 				    major, build, patch);
   11038 			}
   11039 		}
   11040 	}
   11041 
   11042 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11043 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11044 }
   11045 
   11046 /*
   11047  * wm_nvm_read:
   11048  *
   11049  *	Read data from the serial EEPROM.
   11050  */
   11051 static int
   11052 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11053 {
   11054 	int rv;
   11055 
   11056 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11057 		device_xname(sc->sc_dev), __func__));
   11058 
   11059 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11060 		return 1;
   11061 
   11062 	if (wm_nvm_acquire(sc))
   11063 		return 1;
   11064 
   11065 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11066 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11067 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11068 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11069 	else if (sc->sc_type == WM_T_PCH_SPT)
   11070 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11071 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11072 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11073 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11074 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11075 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11076 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11077 	else
   11078 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11079 
   11080 	wm_nvm_release(sc);
   11081 	return rv;
   11082 }
   11083 
   11084 /*
   11085  * Hardware semaphores.
   11086  * Very complexed...
   11087  */
   11088 
   11089 static int
   11090 wm_get_swsm_semaphore(struct wm_softc *sc)
   11091 {
   11092 	int32_t timeout;
   11093 	uint32_t swsm;
   11094 
   11095 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11096 		device_xname(sc->sc_dev), __func__));
   11097 
   11098 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11099 		/* Get the SW semaphore. */
   11100 		timeout = sc->sc_nvm_wordsize + 1;
   11101 		while (timeout) {
   11102 			swsm = CSR_READ(sc, WMREG_SWSM);
   11103 
   11104 			if ((swsm & SWSM_SMBI) == 0)
   11105 				break;
   11106 
   11107 			delay(50);
   11108 			timeout--;
   11109 		}
   11110 
   11111 		if (timeout == 0) {
   11112 			aprint_error_dev(sc->sc_dev,
   11113 			    "could not acquire SWSM SMBI\n");
   11114 			return 1;
   11115 		}
   11116 	}
   11117 
   11118 	/* Get the FW semaphore. */
   11119 	timeout = sc->sc_nvm_wordsize + 1;
   11120 	while (timeout) {
   11121 		swsm = CSR_READ(sc, WMREG_SWSM);
   11122 		swsm |= SWSM_SWESMBI;
   11123 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11124 		/* If we managed to set the bit we got the semaphore. */
   11125 		swsm = CSR_READ(sc, WMREG_SWSM);
   11126 		if (swsm & SWSM_SWESMBI)
   11127 			break;
   11128 
   11129 		delay(50);
   11130 		timeout--;
   11131 	}
   11132 
   11133 	if (timeout == 0) {
   11134 		aprint_error_dev(sc->sc_dev,
   11135 		    "could not acquire SWSM SWESMBI\n");
   11136 		/* Release semaphores */
   11137 		wm_put_swsm_semaphore(sc);
   11138 		return 1;
   11139 	}
   11140 	return 0;
   11141 }
   11142 
   11143 /*
   11144  * Put hardware semaphore.
   11145  * Same as e1000_put_hw_semaphore_generic()
   11146  */
   11147 static void
   11148 wm_put_swsm_semaphore(struct wm_softc *sc)
   11149 {
   11150 	uint32_t swsm;
   11151 
   11152 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11153 		device_xname(sc->sc_dev), __func__));
   11154 
   11155 	swsm = CSR_READ(sc, WMREG_SWSM);
   11156 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11157 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11158 }
   11159 
   11160 /*
   11161  * Get SW/FW semaphore.
   11162  * Same as e1000_acquire_swfw_sync_82575().
   11163  */
   11164 static int
   11165 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11166 {
   11167 	uint32_t swfw_sync;
   11168 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11169 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11170 	int timeout = 200;
   11171 
   11172 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11173 		device_xname(sc->sc_dev), __func__));
   11174 
   11175 	for (timeout = 0; timeout < 200; timeout++) {
   11176 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11177 			if (wm_get_swsm_semaphore(sc)) {
   11178 				aprint_error_dev(sc->sc_dev,
   11179 				    "%s: failed to get semaphore\n",
   11180 				    __func__);
   11181 				return 1;
   11182 			}
   11183 		}
   11184 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11185 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11186 			swfw_sync |= swmask;
   11187 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11188 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11189 				wm_put_swsm_semaphore(sc);
   11190 			return 0;
   11191 		}
   11192 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11193 			wm_put_swsm_semaphore(sc);
   11194 		delay(5000);
   11195 	}
   11196 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11197 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11198 	return 1;
   11199 }
   11200 
   11201 static void
   11202 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11203 {
   11204 	uint32_t swfw_sync;
   11205 
   11206 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11207 		device_xname(sc->sc_dev), __func__));
   11208 
   11209 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11210 		while (wm_get_swsm_semaphore(sc) != 0)
   11211 			continue;
   11212 	}
   11213 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11214 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11215 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11216 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11217 		wm_put_swsm_semaphore(sc);
   11218 }
   11219 
   11220 static int
   11221 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11222 {
   11223 	uint32_t ext_ctrl;
   11224 	int timeout = 200;
   11225 
   11226 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11227 		device_xname(sc->sc_dev), __func__));
   11228 
   11229 	for (timeout = 0; timeout < 200; timeout++) {
   11230 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11231 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11232 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11233 
   11234 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11235 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11236 			return 0;
   11237 		delay(5000);
   11238 	}
   11239 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11240 	    device_xname(sc->sc_dev), ext_ctrl);
   11241 	return 1;
   11242 }
   11243 
   11244 static void
   11245 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11246 {
   11247 	uint32_t ext_ctrl;
   11248 
   11249 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11250 		device_xname(sc->sc_dev), __func__));
   11251 
   11252 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11253 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11254 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11255 }
   11256 
   11257 static int
   11258 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11259 {
   11260 	int i = 0;
   11261 	uint32_t reg;
   11262 
   11263 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11264 		device_xname(sc->sc_dev), __func__));
   11265 
   11266 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11267 	do {
   11268 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11269 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11270 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11271 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11272 			break;
   11273 		delay(2*1000);
   11274 		i++;
   11275 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11276 
   11277 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11278 		wm_put_hw_semaphore_82573(sc);
   11279 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11280 		    device_xname(sc->sc_dev));
   11281 		return -1;
   11282 	}
   11283 
   11284 	return 0;
   11285 }
   11286 
   11287 static void
   11288 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11289 {
   11290 	uint32_t reg;
   11291 
   11292 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11293 		device_xname(sc->sc_dev), __func__));
   11294 
   11295 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11296 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11297 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11298 }
   11299 
   11300 /*
   11301  * Management mode and power management related subroutines.
   11302  * BMC, AMT, suspend/resume and EEE.
   11303  */
   11304 
   11305 #ifdef WM_WOL
   11306 static int
   11307 wm_check_mng_mode(struct wm_softc *sc)
   11308 {
   11309 	int rv;
   11310 
   11311 	switch (sc->sc_type) {
   11312 	case WM_T_ICH8:
   11313 	case WM_T_ICH9:
   11314 	case WM_T_ICH10:
   11315 	case WM_T_PCH:
   11316 	case WM_T_PCH2:
   11317 	case WM_T_PCH_LPT:
   11318 	case WM_T_PCH_SPT:
   11319 		rv = wm_check_mng_mode_ich8lan(sc);
   11320 		break;
   11321 	case WM_T_82574:
   11322 	case WM_T_82583:
   11323 		rv = wm_check_mng_mode_82574(sc);
   11324 		break;
   11325 	case WM_T_82571:
   11326 	case WM_T_82572:
   11327 	case WM_T_82573:
   11328 	case WM_T_80003:
   11329 		rv = wm_check_mng_mode_generic(sc);
   11330 		break;
   11331 	default:
   11332 		/* noting to do */
   11333 		rv = 0;
   11334 		break;
   11335 	}
   11336 
   11337 	return rv;
   11338 }
   11339 
   11340 static int
   11341 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11342 {
   11343 	uint32_t fwsm;
   11344 
   11345 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11346 
   11347 	if (((fwsm & FWSM_FW_VALID) != 0)
   11348 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11349 		return 1;
   11350 
   11351 	return 0;
   11352 }
   11353 
   11354 static int
   11355 wm_check_mng_mode_82574(struct wm_softc *sc)
   11356 {
   11357 	uint16_t data;
   11358 
   11359 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11360 
   11361 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11362 		return 1;
   11363 
   11364 	return 0;
   11365 }
   11366 
   11367 static int
   11368 wm_check_mng_mode_generic(struct wm_softc *sc)
   11369 {
   11370 	uint32_t fwsm;
   11371 
   11372 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11373 
   11374 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11375 		return 1;
   11376 
   11377 	return 0;
   11378 }
   11379 #endif /* WM_WOL */
   11380 
   11381 static int
   11382 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11383 {
   11384 	uint32_t manc, fwsm, factps;
   11385 
   11386 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11387 		return 0;
   11388 
   11389 	manc = CSR_READ(sc, WMREG_MANC);
   11390 
   11391 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11392 		device_xname(sc->sc_dev), manc));
   11393 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11394 		return 0;
   11395 
   11396 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11397 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11398 		factps = CSR_READ(sc, WMREG_FACTPS);
   11399 		if (((factps & FACTPS_MNGCG) == 0)
   11400 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11401 			return 1;
   11402 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11403 		uint16_t data;
   11404 
   11405 		factps = CSR_READ(sc, WMREG_FACTPS);
   11406 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11407 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11408 			device_xname(sc->sc_dev), factps, data));
   11409 		if (((factps & FACTPS_MNGCG) == 0)
   11410 		    && ((data & NVM_CFG2_MNGM_MASK)
   11411 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11412 			return 1;
   11413 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11414 	    && ((manc & MANC_ASF_EN) == 0))
   11415 		return 1;
   11416 
   11417 	return 0;
   11418 }
   11419 
   11420 static bool
   11421 wm_phy_resetisblocked(struct wm_softc *sc)
   11422 {
   11423 	bool blocked = false;
   11424 	uint32_t reg;
   11425 	int i = 0;
   11426 
   11427 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11428 		device_xname(sc->sc_dev), __func__));
   11429 
   11430 	switch (sc->sc_type) {
   11431 	case WM_T_ICH8:
   11432 	case WM_T_ICH9:
   11433 	case WM_T_ICH10:
   11434 	case WM_T_PCH:
   11435 	case WM_T_PCH2:
   11436 	case WM_T_PCH_LPT:
   11437 	case WM_T_PCH_SPT:
   11438 		do {
   11439 			reg = CSR_READ(sc, WMREG_FWSM);
   11440 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11441 				blocked = true;
   11442 				delay(10*1000);
   11443 				continue;
   11444 			}
   11445 			blocked = false;
   11446 		} while (blocked && (i++ < 10));
   11447 		return blocked;
   11448 		break;
   11449 	case WM_T_82571:
   11450 	case WM_T_82572:
   11451 	case WM_T_82573:
   11452 	case WM_T_82574:
   11453 	case WM_T_82583:
   11454 	case WM_T_80003:
   11455 		reg = CSR_READ(sc, WMREG_MANC);
   11456 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11457 			return true;
   11458 		else
   11459 			return false;
   11460 		break;
   11461 	default:
   11462 		/* no problem */
   11463 		break;
   11464 	}
   11465 
   11466 	return false;
   11467 }
   11468 
   11469 static void
   11470 wm_get_hw_control(struct wm_softc *sc)
   11471 {
   11472 	uint32_t reg;
   11473 
   11474 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11475 		device_xname(sc->sc_dev), __func__));
   11476 
   11477 	switch (sc->sc_type) {
   11478 	case WM_T_82573:
   11479 		reg = CSR_READ(sc, WMREG_SWSM);
   11480 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11481 		break;
   11482 	case WM_T_82571:
   11483 	case WM_T_82572:
   11484 	case WM_T_82574:
   11485 	case WM_T_82583:
   11486 	case WM_T_80003:
   11487 	case WM_T_ICH8:
   11488 	case WM_T_ICH9:
   11489 	case WM_T_ICH10:
   11490 	case WM_T_PCH:
   11491 	case WM_T_PCH2:
   11492 	case WM_T_PCH_LPT:
   11493 	case WM_T_PCH_SPT:
   11494 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11495 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11496 		break;
   11497 	default:
   11498 		break;
   11499 	}
   11500 }
   11501 
   11502 static void
   11503 wm_release_hw_control(struct wm_softc *sc)
   11504 {
   11505 	uint32_t reg;
   11506 
   11507 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11508 		device_xname(sc->sc_dev), __func__));
   11509 
   11510 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11511 		return;
   11512 
   11513 	if (sc->sc_type == WM_T_82573) {
   11514 		reg = CSR_READ(sc, WMREG_SWSM);
   11515 		reg &= ~SWSM_DRV_LOAD;
   11516 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11517 	} else {
   11518 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11519 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11520 	}
   11521 }
   11522 
   11523 static void
   11524 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11525 {
   11526 	uint32_t reg;
   11527 
   11528 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11529 		device_xname(sc->sc_dev), __func__));
   11530 
   11531 	if (sc->sc_type < WM_T_PCH2)
   11532 		return;
   11533 
   11534 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11535 
   11536 	if (gate)
   11537 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11538 	else
   11539 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11540 
   11541 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11542 }
   11543 
   11544 static void
   11545 wm_smbustopci(struct wm_softc *sc)
   11546 {
   11547 	uint32_t fwsm, reg;
   11548 
   11549 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11550 		device_xname(sc->sc_dev), __func__));
   11551 
   11552 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11553 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11554 
   11555 	/* Acquire semaphore */
   11556 	wm_get_swfwhw_semaphore(sc);
   11557 
   11558 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11559 	if (((fwsm & FWSM_FW_VALID) == 0)
   11560 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11561 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11562 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11563 			reg |= CTRL_EXT_FORCE_SMBUS;
   11564 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11565 			CSR_WRITE_FLUSH(sc);
   11566 			delay(50*1000);
   11567 		}
   11568 
   11569 		/* Toggle LANPHYPC */
   11570 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11571 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11572 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11573 		CSR_WRITE_FLUSH(sc);
   11574 		delay(1000);
   11575 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11576 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11577 		CSR_WRITE_FLUSH(sc);
   11578 		delay(50*1000);
   11579 
   11580 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11581 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11582 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11583 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11584 		}
   11585 	}
   11586 
   11587 	/* Release semaphore */
   11588 	wm_put_swfwhw_semaphore(sc);
   11589 
   11590 	/*
   11591 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11592 	 */
   11593 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11594 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11595 }
   11596 
   11597 static void
   11598 wm_init_manageability(struct wm_softc *sc)
   11599 {
   11600 
   11601 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11602 		device_xname(sc->sc_dev), __func__));
   11603 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11604 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11605 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11606 
   11607 		/* Disable hardware interception of ARP */
   11608 		manc &= ~MANC_ARP_EN;
   11609 
   11610 		/* Enable receiving management packets to the host */
   11611 		if (sc->sc_type >= WM_T_82571) {
   11612 			manc |= MANC_EN_MNG2HOST;
   11613 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11614 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11615 		}
   11616 
   11617 		CSR_WRITE(sc, WMREG_MANC, manc);
   11618 	}
   11619 }
   11620 
   11621 static void
   11622 wm_release_manageability(struct wm_softc *sc)
   11623 {
   11624 
   11625 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11626 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11627 
   11628 		manc |= MANC_ARP_EN;
   11629 		if (sc->sc_type >= WM_T_82571)
   11630 			manc &= ~MANC_EN_MNG2HOST;
   11631 
   11632 		CSR_WRITE(sc, WMREG_MANC, manc);
   11633 	}
   11634 }
   11635 
   11636 static void
   11637 wm_get_wakeup(struct wm_softc *sc)
   11638 {
   11639 
   11640 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11641 	switch (sc->sc_type) {
   11642 	case WM_T_82573:
   11643 	case WM_T_82583:
   11644 		sc->sc_flags |= WM_F_HAS_AMT;
   11645 		/* FALLTHROUGH */
   11646 	case WM_T_80003:
   11647 	case WM_T_82541:
   11648 	case WM_T_82547:
   11649 	case WM_T_82571:
   11650 	case WM_T_82572:
   11651 	case WM_T_82574:
   11652 	case WM_T_82575:
   11653 	case WM_T_82576:
   11654 	case WM_T_82580:
   11655 	case WM_T_I350:
   11656 	case WM_T_I354:
   11657 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11658 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11659 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11660 		break;
   11661 	case WM_T_ICH8:
   11662 	case WM_T_ICH9:
   11663 	case WM_T_ICH10:
   11664 	case WM_T_PCH:
   11665 	case WM_T_PCH2:
   11666 	case WM_T_PCH_LPT:
   11667 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11668 		sc->sc_flags |= WM_F_HAS_AMT;
   11669 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11670 		break;
   11671 	default:
   11672 		break;
   11673 	}
   11674 
   11675 	/* 1: HAS_MANAGE */
   11676 	if (wm_enable_mng_pass_thru(sc) != 0)
   11677 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11678 
   11679 #ifdef WM_DEBUG
   11680 	printf("\n");
   11681 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11682 		printf("HAS_AMT,");
   11683 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11684 		printf("ARC_SUBSYS_VALID,");
   11685 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11686 		printf("ASF_FIRMWARE_PRES,");
   11687 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11688 		printf("HAS_MANAGE,");
   11689 	printf("\n");
   11690 #endif
   11691 	/*
   11692 	 * Note that the WOL flags is set after the resetting of the eeprom
   11693 	 * stuff
   11694 	 */
   11695 }
   11696 
   11697 #ifdef WM_WOL
   11698 /* WOL in the newer chipset interfaces (pchlan) */
   11699 static void
   11700 wm_enable_phy_wakeup(struct wm_softc *sc)
   11701 {
   11702 #if 0
   11703 	uint16_t preg;
   11704 
   11705 	/* Copy MAC RARs to PHY RARs */
   11706 
   11707 	/* Copy MAC MTA to PHY MTA */
   11708 
   11709 	/* Configure PHY Rx Control register */
   11710 
   11711 	/* Enable PHY wakeup in MAC register */
   11712 
   11713 	/* Configure and enable PHY wakeup in PHY registers */
   11714 
   11715 	/* Activate PHY wakeup */
   11716 
   11717 	/* XXX */
   11718 #endif
   11719 }
   11720 
   11721 /* Power down workaround on D3 */
   11722 static void
   11723 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11724 {
   11725 	uint32_t reg;
   11726 	int i;
   11727 
   11728 	for (i = 0; i < 2; i++) {
   11729 		/* Disable link */
   11730 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11731 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11732 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11733 
   11734 		/*
   11735 		 * Call gig speed drop workaround on Gig disable before
   11736 		 * accessing any PHY registers
   11737 		 */
   11738 		if (sc->sc_type == WM_T_ICH8)
   11739 			wm_gig_downshift_workaround_ich8lan(sc);
   11740 
   11741 		/* Write VR power-down enable */
   11742 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11743 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11744 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11745 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11746 
   11747 		/* Read it back and test */
   11748 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11749 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11750 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11751 			break;
   11752 
   11753 		/* Issue PHY reset and repeat at most one more time */
   11754 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11755 	}
   11756 }
   11757 
   11758 static void
   11759 wm_enable_wakeup(struct wm_softc *sc)
   11760 {
   11761 	uint32_t reg, pmreg;
   11762 	pcireg_t pmode;
   11763 
   11764 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11765 		&pmreg, NULL) == 0)
   11766 		return;
   11767 
   11768 	/* Advertise the wakeup capability */
   11769 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11770 	    | CTRL_SWDPIN(3));
   11771 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11772 
   11773 	/* ICH workaround */
   11774 	switch (sc->sc_type) {
   11775 	case WM_T_ICH8:
   11776 	case WM_T_ICH9:
   11777 	case WM_T_ICH10:
   11778 	case WM_T_PCH:
   11779 	case WM_T_PCH2:
   11780 	case WM_T_PCH_LPT:
   11781 	case WM_T_PCH_SPT:
   11782 		/* Disable gig during WOL */
   11783 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11784 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11785 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11786 		if (sc->sc_type == WM_T_PCH)
   11787 			wm_gmii_reset(sc);
   11788 
   11789 		/* Power down workaround */
   11790 		if (sc->sc_phytype == WMPHY_82577) {
   11791 			struct mii_softc *child;
   11792 
   11793 			/* Assume that the PHY is copper */
   11794 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11795 			if (child->mii_mpd_rev <= 2)
   11796 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11797 				    (768 << 5) | 25, 0x0444); /* magic num */
   11798 		}
   11799 		break;
   11800 	default:
   11801 		break;
   11802 	}
   11803 
   11804 	/* Keep the laser running on fiber adapters */
   11805 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11806 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11807 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11808 		reg |= CTRL_EXT_SWDPIN(3);
   11809 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11810 	}
   11811 
   11812 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11813 #if 0	/* for the multicast packet */
   11814 	reg |= WUFC_MC;
   11815 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11816 #endif
   11817 
   11818 	if (sc->sc_type == WM_T_PCH) {
   11819 		wm_enable_phy_wakeup(sc);
   11820 	} else {
   11821 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11822 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11823 	}
   11824 
   11825 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11826 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11827 		|| (sc->sc_type == WM_T_PCH2))
   11828 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11829 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11830 
   11831 	/* Request PME */
   11832 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11833 #if 0
   11834 	/* Disable WOL */
   11835 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11836 #else
   11837 	/* For WOL */
   11838 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11839 #endif
   11840 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11841 }
   11842 #endif /* WM_WOL */
   11843 
   11844 /* LPLU */
   11845 
   11846 static void
   11847 wm_lplu_d0_disable(struct wm_softc *sc)
   11848 {
   11849 	uint32_t reg;
   11850 
   11851 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11852 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11853 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11854 }
   11855 
   11856 static void
   11857 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11858 {
   11859 	uint32_t reg;
   11860 
   11861 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11862 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11863 	reg |= HV_OEM_BITS_ANEGNOW;
   11864 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11865 }
   11866 
   11867 /* EEE */
   11868 
   11869 static void
   11870 wm_set_eee_i350(struct wm_softc *sc)
   11871 {
   11872 	uint32_t ipcnfg, eeer;
   11873 
   11874 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11875 	eeer = CSR_READ(sc, WMREG_EEER);
   11876 
   11877 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11878 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11879 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11880 		    | EEER_LPI_FC);
   11881 	} else {
   11882 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11883 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11884 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11885 		    | EEER_LPI_FC);
   11886 	}
   11887 
   11888 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11889 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11890 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11891 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11892 }
   11893 
   11894 /*
   11895  * Workarounds (mainly PHY related).
   11896  * Basically, PHY's workarounds are in the PHY drivers.
   11897  */
   11898 
   11899 /* Work-around for 82566 Kumeran PCS lock loss */
   11900 static void
   11901 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11902 {
   11903 #if 0
   11904 	int miistatus, active, i;
   11905 	int reg;
   11906 
   11907 	miistatus = sc->sc_mii.mii_media_status;
   11908 
   11909 	/* If the link is not up, do nothing */
   11910 	if ((miistatus & IFM_ACTIVE) == 0)
   11911 		return;
   11912 
   11913 	active = sc->sc_mii.mii_media_active;
   11914 
   11915 	/* Nothing to do if the link is other than 1Gbps */
   11916 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11917 		return;
   11918 
   11919 	for (i = 0; i < 10; i++) {
   11920 		/* read twice */
   11921 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11922 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11923 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11924 			goto out;	/* GOOD! */
   11925 
   11926 		/* Reset the PHY */
   11927 		wm_gmii_reset(sc);
   11928 		delay(5*1000);
   11929 	}
   11930 
   11931 	/* Disable GigE link negotiation */
   11932 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11933 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11934 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11935 
   11936 	/*
   11937 	 * Call gig speed drop workaround on Gig disable before accessing
   11938 	 * any PHY registers.
   11939 	 */
   11940 	wm_gig_downshift_workaround_ich8lan(sc);
   11941 
   11942 out:
   11943 	return;
   11944 #endif
   11945 }
   11946 
   11947 /* WOL from S5 stops working */
   11948 static void
   11949 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11950 {
   11951 	uint16_t kmrn_reg;
   11952 
   11953 	/* Only for igp3 */
   11954 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11955 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11956 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11957 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11958 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11959 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11960 	}
   11961 }
   11962 
   11963 /*
   11964  * Workaround for pch's PHYs
   11965  * XXX should be moved to new PHY driver?
   11966  */
   11967 static void
   11968 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11969 {
   11970 
   11971 	KASSERT(sc->sc_type == WM_T_PCH);
   11972 
   11973 	if (sc->sc_phytype == WMPHY_82577)
   11974 		wm_set_mdio_slow_mode_hv(sc);
   11975 
   11976 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11977 
   11978 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11979 
   11980 	/* 82578 */
   11981 	if (sc->sc_phytype == WMPHY_82578) {
   11982 		/* PCH rev. < 3 */
   11983 		if (sc->sc_rev < 3) {
   11984 			/* XXX 6 bit shift? Why? Is it page2? */
   11985 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11986 			    0x66c0);
   11987 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11988 			    0xffff);
   11989 		}
   11990 
   11991 		/* XXX phy rev. < 2 */
   11992 	}
   11993 
   11994 	/* Select page 0 */
   11995 
   11996 	/* XXX acquire semaphore */
   11997 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11998 	/* XXX release semaphore */
   11999 
   12000 	/*
   12001 	 * Configure the K1 Si workaround during phy reset assuming there is
   12002 	 * link so that it disables K1 if link is in 1Gbps.
   12003 	 */
   12004 	wm_k1_gig_workaround_hv(sc, 1);
   12005 }
   12006 
   12007 static void
   12008 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12009 {
   12010 
   12011 	KASSERT(sc->sc_type == WM_T_PCH2);
   12012 
   12013 	wm_set_mdio_slow_mode_hv(sc);
   12014 }
   12015 
   12016 static void
   12017 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12018 {
   12019 	int k1_enable = sc->sc_nvm_k1_enabled;
   12020 
   12021 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12022 		device_xname(sc->sc_dev), __func__));
   12023 
   12024 	/* XXX acquire semaphore */
   12025 
   12026 	if (link) {
   12027 		k1_enable = 0;
   12028 
   12029 		/* Link stall fix for link up */
   12030 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12031 	} else {
   12032 		/* Link stall fix for link down */
   12033 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12034 	}
   12035 
   12036 	wm_configure_k1_ich8lan(sc, k1_enable);
   12037 
   12038 	/* XXX release semaphore */
   12039 }
   12040 
   12041 static void
   12042 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12043 {
   12044 	uint32_t reg;
   12045 
   12046 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12047 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12048 	    reg | HV_KMRN_MDIO_SLOW);
   12049 }
   12050 
   12051 static void
   12052 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12053 {
   12054 	uint32_t ctrl, ctrl_ext, tmp;
   12055 	uint16_t kmrn_reg;
   12056 
   12057 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12058 
   12059 	if (k1_enable)
   12060 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12061 	else
   12062 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12063 
   12064 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12065 
   12066 	delay(20);
   12067 
   12068 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12069 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12070 
   12071 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12072 	tmp |= CTRL_FRCSPD;
   12073 
   12074 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12075 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12076 	CSR_WRITE_FLUSH(sc);
   12077 	delay(20);
   12078 
   12079 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12080 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12081 	CSR_WRITE_FLUSH(sc);
   12082 	delay(20);
   12083 }
   12084 
   12085 /* special case - for 82575 - need to do manual init ... */
   12086 static void
   12087 wm_reset_init_script_82575(struct wm_softc *sc)
   12088 {
   12089 	/*
   12090 	 * remark: this is untested code - we have no board without EEPROM
   12091 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12092 	 */
   12093 
   12094 	/* SerDes configuration via SERDESCTRL */
   12095 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12096 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12097 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12098 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12099 
   12100 	/* CCM configuration via CCMCTL register */
   12101 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12102 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12103 
   12104 	/* PCIe lanes configuration */
   12105 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12106 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12107 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12108 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12109 
   12110 	/* PCIe PLL Configuration */
   12111 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12112 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12113 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12114 }
   12115 
   12116 static void
   12117 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12118 {
   12119 	uint32_t reg;
   12120 	uint16_t nvmword;
   12121 	int rv;
   12122 
   12123 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12124 		return;
   12125 
   12126 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12127 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12128 	if (rv != 0) {
   12129 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12130 		    __func__);
   12131 		return;
   12132 	}
   12133 
   12134 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12135 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12136 		reg |= MDICNFG_DEST;
   12137 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12138 		reg |= MDICNFG_COM_MDIO;
   12139 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12140 }
   12141 
   12142 /*
   12143  * I210 Errata 25 and I211 Errata 10
   12144  * Slow System Clock.
   12145  */
   12146 static void
   12147 wm_pll_workaround_i210(struct wm_softc *sc)
   12148 {
   12149 	uint32_t mdicnfg, wuc;
   12150 	uint32_t reg;
   12151 	pcireg_t pcireg;
   12152 	uint32_t pmreg;
   12153 	uint16_t nvmword, tmp_nvmword;
   12154 	int phyval;
   12155 	bool wa_done = false;
   12156 	int i;
   12157 
   12158 	/* Save WUC and MDICNFG registers */
   12159 	wuc = CSR_READ(sc, WMREG_WUC);
   12160 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12161 
   12162 	reg = mdicnfg & ~MDICNFG_DEST;
   12163 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12164 
   12165 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12166 		nvmword = INVM_DEFAULT_AL;
   12167 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12168 
   12169 	/* Get Power Management cap offset */
   12170 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12171 		&pmreg, NULL) == 0)
   12172 		return;
   12173 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12174 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12175 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12176 
   12177 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12178 			break; /* OK */
   12179 		}
   12180 
   12181 		wa_done = true;
   12182 		/* Directly reset the internal PHY */
   12183 		reg = CSR_READ(sc, WMREG_CTRL);
   12184 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12185 
   12186 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12187 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12188 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12189 
   12190 		CSR_WRITE(sc, WMREG_WUC, 0);
   12191 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12192 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12193 
   12194 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12195 		    pmreg + PCI_PMCSR);
   12196 		pcireg |= PCI_PMCSR_STATE_D3;
   12197 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12198 		    pmreg + PCI_PMCSR, pcireg);
   12199 		delay(1000);
   12200 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12201 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12202 		    pmreg + PCI_PMCSR, pcireg);
   12203 
   12204 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12205 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12206 
   12207 		/* Restore WUC register */
   12208 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12209 	}
   12210 
   12211 	/* Restore MDICNFG setting */
   12212 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12213 	if (wa_done)
   12214 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12215 }
   12216