Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.420
      1 /*	$NetBSD: if_wm.c,v 1.420 2016/10/19 08:55:23 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.420 2016/10/19 08:55:23 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 #ifdef WM_EVENT_COUNTERS
    328 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    329 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    330 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    331 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    332 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    333 						/* XXX not used? */
    334 
    335 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    336 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    337 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    339 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    340 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    341 
    342 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    345 
    346 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    347 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    348 #endif /* WM_EVENT_COUNTERS */
    349 };
    350 
    351 struct wm_rxqueue {
    352 	kmutex_t *rxq_lock;		/* lock for rx operations */
    353 
    354 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    355 
    356 	/* Software state for the receive descriptors. */
    357 	wiseman_rxdesc_t *rxq_descs;
    358 
    359 	/* RX control data structures. */
    360 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    361 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    363 	int rxq_desc_rseg;		/* real number of control segment */
    364 	size_t rxq_desc_size;		/* control data size */
    365 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    366 
    367 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    368 
    369 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    370 	int rxq_discard;
    371 	int rxq_len;
    372 	struct mbuf *rxq_head;
    373 	struct mbuf *rxq_tail;
    374 	struct mbuf **rxq_tailp;
    375 
    376 #ifdef WM_EVENT_COUNTERS
    377 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    378 
    379 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    380 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    381 #endif
    382 };
    383 
    384 struct wm_queue {
    385 	int wmq_id;			/* index of transmit and receive queues */
    386 	int wmq_intr_idx;		/* index of MSI-X tables */
    387 
    388 	struct wm_txqueue wmq_txq;
    389 	struct wm_rxqueue wmq_rxq;
    390 };
    391 
    392 /*
    393  * Software state per device.
    394  */
    395 struct wm_softc {
    396 	device_t sc_dev;		/* generic device information */
    397 	bus_space_tag_t sc_st;		/* bus space tag */
    398 	bus_space_handle_t sc_sh;	/* bus space handle */
    399 	bus_size_t sc_ss;		/* bus space size */
    400 	bus_space_tag_t sc_iot;		/* I/O space tag */
    401 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    402 	bus_size_t sc_ios;		/* I/O space size */
    403 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    404 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    405 	bus_size_t sc_flashs;		/* flash registers space size */
    406 	off_t sc_flashreg_offset;	/*
    407 					 * offset to flash registers from
    408 					 * start of BAR
    409 					 */
    410 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    411 
    412 	struct ethercom sc_ethercom;	/* ethernet common data */
    413 	struct mii_data sc_mii;		/* MII/media information */
    414 
    415 	pci_chipset_tag_t sc_pc;
    416 	pcitag_t sc_pcitag;
    417 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    418 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    419 
    420 	uint16_t sc_pcidevid;		/* PCI device ID */
    421 	wm_chip_type sc_type;		/* MAC type */
    422 	int sc_rev;			/* MAC revision */
    423 	wm_phy_type sc_phytype;		/* PHY type */
    424 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    425 #define	WM_MEDIATYPE_UNKNOWN		0x00
    426 #define	WM_MEDIATYPE_FIBER		0x01
    427 #define	WM_MEDIATYPE_COPPER		0x02
    428 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    429 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    430 	int sc_flags;			/* flags; see below */
    431 	int sc_if_flags;		/* last if_flags */
    432 	int sc_flowflags;		/* 802.3x flow control flags */
    433 	int sc_align_tweak;
    434 
    435 	void *sc_ihs[WM_MAX_NINTR];	/*
    436 					 * interrupt cookie.
    437 					 * legacy and msi use sc_ihs[0].
    438 					 */
    439 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    440 	int sc_nintrs;			/* number of interrupts */
    441 
    442 	int sc_link_intr_idx;		/* index of MSI-X tables */
    443 
    444 	callout_t sc_tick_ch;		/* tick callout */
    445 	bool sc_stopping;
    446 
    447 	int sc_nvm_ver_major;
    448 	int sc_nvm_ver_minor;
    449 	int sc_nvm_ver_build;
    450 	int sc_nvm_addrbits;		/* NVM address bits */
    451 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    452 	int sc_ich8_flash_base;
    453 	int sc_ich8_flash_bank_size;
    454 	int sc_nvm_k1_enabled;
    455 
    456 	int sc_nqueues;
    457 	struct wm_queue *sc_queue;
    458 
    459 	int sc_affinity_offset;
    460 
    461 #ifdef WM_EVENT_COUNTERS
    462 	/* Event counters. */
    463 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    464 
    465         /* WM_T_82542_2_1 only */
    466 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    467 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    468 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    469 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    470 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    471 #endif /* WM_EVENT_COUNTERS */
    472 
    473 	/* This variable are used only on the 82547. */
    474 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    475 
    476 	uint32_t sc_ctrl;		/* prototype CTRL register */
    477 #if 0
    478 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    479 #endif
    480 	uint32_t sc_icr;		/* prototype interrupt bits */
    481 	uint32_t sc_itr;		/* prototype intr throttling reg */
    482 	uint32_t sc_tctl;		/* prototype TCTL register */
    483 	uint32_t sc_rctl;		/* prototype RCTL register */
    484 	uint32_t sc_txcw;		/* prototype TXCW register */
    485 	uint32_t sc_tipg;		/* prototype TIPG register */
    486 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    487 	uint32_t sc_pba;		/* prototype PBA register */
    488 
    489 	int sc_tbi_linkup;		/* TBI link status */
    490 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    491 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    492 
    493 	int sc_mchash_type;		/* multicast filter offset */
    494 
    495 	krndsource_t rnd_source;	/* random source */
    496 
    497 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    498 
    499 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    500 };
    501 
    502 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    503 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    504 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    505 
    506 #ifdef WM_MPSAFE
    507 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    508 #else
    509 #define CALLOUT_FLAGS	0
    510 #endif
    511 
    512 #define	WM_RXCHAIN_RESET(rxq)						\
    513 do {									\
    514 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    515 	*(rxq)->rxq_tailp = NULL;					\
    516 	(rxq)->rxq_len = 0;						\
    517 } while (/*CONSTCOND*/0)
    518 
    519 #define	WM_RXCHAIN_LINK(rxq, m)						\
    520 do {									\
    521 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    522 	(rxq)->rxq_tailp = &(m)->m_next;				\
    523 } while (/*CONSTCOND*/0)
    524 
    525 #ifdef WM_EVENT_COUNTERS
    526 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    527 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    528 
    529 #define WM_Q_EVCNT_INCR(qname, evname)			\
    530 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    531 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    532 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    533 #else /* !WM_EVENT_COUNTERS */
    534 #define	WM_EVCNT_INCR(ev)	/* nothing */
    535 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    536 
    537 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    538 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    539 #endif /* !WM_EVENT_COUNTERS */
    540 
    541 #define	CSR_READ(sc, reg)						\
    542 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    543 #define	CSR_WRITE(sc, reg, val)						\
    544 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    545 #define	CSR_WRITE_FLUSH(sc)						\
    546 	(void) CSR_READ((sc), WMREG_STATUS)
    547 
    548 #define ICH8_FLASH_READ32(sc, reg)					\
    549 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    550 	    (reg) + sc->sc_flashreg_offset)
    551 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    552 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    553 	    (reg) + sc->sc_flashreg_offset, (data))
    554 
    555 #define ICH8_FLASH_READ16(sc, reg)					\
    556 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    557 	    (reg) + sc->sc_flashreg_offset)
    558 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    559 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    560 	    (reg) + sc->sc_flashreg_offset, (data))
    561 
    562 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    563 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    564 
    565 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    566 #define	WM_CDTXADDR_HI(txq, x)						\
    567 	(sizeof(bus_addr_t) == 8 ?					\
    568 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    569 
    570 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    571 #define	WM_CDRXADDR_HI(rxq, x)						\
    572 	(sizeof(bus_addr_t) == 8 ?					\
    573 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    574 
    575 /*
    576  * Register read/write functions.
    577  * Other than CSR_{READ|WRITE}().
    578  */
    579 #if 0
    580 static inline uint32_t wm_io_read(struct wm_softc *, int);
    581 #endif
    582 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    583 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    584 	uint32_t, uint32_t);
    585 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    586 
    587 /*
    588  * Descriptor sync/init functions.
    589  */
    590 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    591 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    592 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    593 
    594 /*
    595  * Device driver interface functions and commonly used functions.
    596  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    597  */
    598 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    599 static int	wm_match(device_t, cfdata_t, void *);
    600 static void	wm_attach(device_t, device_t, void *);
    601 static int	wm_detach(device_t, int);
    602 static bool	wm_suspend(device_t, const pmf_qual_t *);
    603 static bool	wm_resume(device_t, const pmf_qual_t *);
    604 static void	wm_watchdog(struct ifnet *);
    605 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    606 static void	wm_tick(void *);
    607 static int	wm_ifflags_cb(struct ethercom *);
    608 static int	wm_ioctl(struct ifnet *, u_long, void *);
    609 /* MAC address related */
    610 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    611 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    612 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    613 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    614 static void	wm_set_filter(struct wm_softc *);
    615 /* Reset and init related */
    616 static void	wm_set_vlan(struct wm_softc *);
    617 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    618 static void	wm_get_auto_rd_done(struct wm_softc *);
    619 static void	wm_lan_init_done(struct wm_softc *);
    620 static void	wm_get_cfg_done(struct wm_softc *);
    621 static void	wm_initialize_hardware_bits(struct wm_softc *);
    622 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    623 static void	wm_reset(struct wm_softc *);
    624 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    625 static void	wm_rxdrain(struct wm_rxqueue *);
    626 static void	wm_rss_getkey(uint8_t *);
    627 static void	wm_init_rss(struct wm_softc *);
    628 static void	wm_adjust_qnum(struct wm_softc *, int);
    629 static int	wm_setup_legacy(struct wm_softc *);
    630 static int	wm_setup_msix(struct wm_softc *);
    631 static int	wm_init(struct ifnet *);
    632 static int	wm_init_locked(struct ifnet *);
    633 static void	wm_stop(struct ifnet *, int);
    634 static void	wm_stop_locked(struct ifnet *, int);
    635 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    636 static void	wm_82547_txfifo_stall(void *);
    637 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    638 /* DMA related */
    639 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    640 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    641 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    642 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    643     struct wm_txqueue *);
    644 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    645 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    646 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    647     struct wm_rxqueue *);
    648 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    649 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    650 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    651 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    652 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    653 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    654 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    655     struct wm_txqueue *);
    656 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    657     struct wm_rxqueue *);
    658 static int	wm_alloc_txrx_queues(struct wm_softc *);
    659 static void	wm_free_txrx_queues(struct wm_softc *);
    660 static int	wm_init_txrx_queues(struct wm_softc *);
    661 /* Start */
    662 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    663     uint32_t *, uint8_t *);
    664 static void	wm_start(struct ifnet *);
    665 static void	wm_start_locked(struct ifnet *);
    666 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    667     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    668 static void	wm_nq_start(struct ifnet *);
    669 static void	wm_nq_start_locked(struct ifnet *);
    670 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    671 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    672 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    673 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    674 /* Interrupt */
    675 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    676 static void	wm_rxeof(struct wm_rxqueue *);
    677 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    678 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    679 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    680 static void	wm_linkintr(struct wm_softc *, uint32_t);
    681 static int	wm_intr_legacy(void *);
    682 static int	wm_txrxintr_msix(void *);
    683 static int	wm_linkintr_msix(void *);
    684 
    685 /*
    686  * Media related.
    687  * GMII, SGMII, TBI, SERDES and SFP.
    688  */
    689 /* Common */
    690 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    691 /* GMII related */
    692 static void	wm_gmii_reset(struct wm_softc *);
    693 static int	wm_get_phy_id_82575(struct wm_softc *);
    694 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    695 static int	wm_gmii_mediachange(struct ifnet *);
    696 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    697 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    698 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    699 static int	wm_gmii_i82543_readreg(device_t, int, int);
    700 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    701 static int	wm_gmii_i82544_readreg(device_t, int, int);
    702 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    703 static int	wm_gmii_i80003_readreg(device_t, int, int);
    704 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    705 static int	wm_gmii_bm_readreg(device_t, int, int);
    706 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    707 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    708 static int	wm_gmii_hv_readreg(device_t, int, int);
    709 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    710 static int	wm_gmii_82580_readreg(device_t, int, int);
    711 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    712 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    713 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    714 static void	wm_gmii_statchg(struct ifnet *);
    715 static int	wm_kmrn_readreg(struct wm_softc *, int);
    716 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    717 /* SGMII */
    718 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    719 static int	wm_sgmii_readreg(device_t, int, int);
    720 static void	wm_sgmii_writereg(device_t, int, int, int);
    721 /* TBI related */
    722 static void	wm_tbi_mediainit(struct wm_softc *);
    723 static int	wm_tbi_mediachange(struct ifnet *);
    724 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    725 static int	wm_check_for_link(struct wm_softc *);
    726 static void	wm_tbi_tick(struct wm_softc *);
    727 /* SERDES related */
    728 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    729 static int	wm_serdes_mediachange(struct ifnet *);
    730 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    731 static void	wm_serdes_tick(struct wm_softc *);
    732 /* SFP related */
    733 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    734 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    735 
    736 /*
    737  * NVM related.
    738  * Microwire, SPI (w/wo EERD) and Flash.
    739  */
    740 /* Misc functions */
    741 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    742 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    743 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    744 /* Microwire */
    745 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    746 /* SPI */
    747 static int	wm_nvm_ready_spi(struct wm_softc *);
    748 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    749 /* Using with EERD */
    750 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    751 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    752 /* Flash */
    753 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    754     unsigned int *);
    755 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    756 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    757 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    758 	uint32_t *);
    759 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    760 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    761 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    762 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    763 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    764 /* iNVM */
    765 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    766 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    767 /* Lock, detecting NVM type, validate checksum and read */
    768 static int	wm_nvm_acquire(struct wm_softc *);
    769 static void	wm_nvm_release(struct wm_softc *);
    770 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    771 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    772 static int	wm_nvm_validate_checksum(struct wm_softc *);
    773 static void	wm_nvm_version_invm(struct wm_softc *);
    774 static void	wm_nvm_version(struct wm_softc *);
    775 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    776 
    777 /*
    778  * Hardware semaphores.
    779  * Very complexed...
    780  */
    781 static int	wm_get_swsm_semaphore(struct wm_softc *);
    782 static void	wm_put_swsm_semaphore(struct wm_softc *);
    783 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    784 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    785 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    786 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    787 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    788 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    789 
    790 /*
    791  * Management mode and power management related subroutines.
    792  * BMC, AMT, suspend/resume and EEE.
    793  */
    794 #ifdef WM_WOL
    795 static int	wm_check_mng_mode(struct wm_softc *);
    796 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    797 static int	wm_check_mng_mode_82574(struct wm_softc *);
    798 static int	wm_check_mng_mode_generic(struct wm_softc *);
    799 #endif
    800 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    801 static bool	wm_phy_resetisblocked(struct wm_softc *);
    802 static void	wm_get_hw_control(struct wm_softc *);
    803 static void	wm_release_hw_control(struct wm_softc *);
    804 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    805 static void	wm_smbustopci(struct wm_softc *);
    806 static void	wm_init_manageability(struct wm_softc *);
    807 static void	wm_release_manageability(struct wm_softc *);
    808 static void	wm_get_wakeup(struct wm_softc *);
    809 #ifdef WM_WOL
    810 static void	wm_enable_phy_wakeup(struct wm_softc *);
    811 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    812 static void	wm_enable_wakeup(struct wm_softc *);
    813 #endif
    814 /* LPLU (Low Power Link Up) */
    815 static void	wm_lplu_d0_disable(struct wm_softc *);
    816 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    817 /* EEE */
    818 static void	wm_set_eee_i350(struct wm_softc *);
    819 
    820 /*
    821  * Workarounds (mainly PHY related).
    822  * Basically, PHY's workarounds are in the PHY drivers.
    823  */
    824 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    825 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    826 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    827 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    828 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    829 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    830 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    831 static void	wm_reset_init_script_82575(struct wm_softc *);
    832 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    833 static void	wm_pll_workaround_i210(struct wm_softc *);
    834 
    835 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    836     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    837 
    838 /*
    839  * Devices supported by this driver.
    840  */
    841 static const struct wm_product {
    842 	pci_vendor_id_t		wmp_vendor;
    843 	pci_product_id_t	wmp_product;
    844 	const char		*wmp_name;
    845 	wm_chip_type		wmp_type;
    846 	uint32_t		wmp_flags;
    847 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    848 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    849 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    850 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    851 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    852 } wm_products[] = {
    853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    854 	  "Intel i82542 1000BASE-X Ethernet",
    855 	  WM_T_82542_2_1,	WMP_F_FIBER },
    856 
    857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    858 	  "Intel i82543GC 1000BASE-X Ethernet",
    859 	  WM_T_82543,		WMP_F_FIBER },
    860 
    861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    862 	  "Intel i82543GC 1000BASE-T Ethernet",
    863 	  WM_T_82543,		WMP_F_COPPER },
    864 
    865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    866 	  "Intel i82544EI 1000BASE-T Ethernet",
    867 	  WM_T_82544,		WMP_F_COPPER },
    868 
    869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    870 	  "Intel i82544EI 1000BASE-X Ethernet",
    871 	  WM_T_82544,		WMP_F_FIBER },
    872 
    873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    874 	  "Intel i82544GC 1000BASE-T Ethernet",
    875 	  WM_T_82544,		WMP_F_COPPER },
    876 
    877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    878 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    879 	  WM_T_82544,		WMP_F_COPPER },
    880 
    881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    882 	  "Intel i82540EM 1000BASE-T Ethernet",
    883 	  WM_T_82540,		WMP_F_COPPER },
    884 
    885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    886 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    887 	  WM_T_82540,		WMP_F_COPPER },
    888 
    889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    890 	  "Intel i82540EP 1000BASE-T Ethernet",
    891 	  WM_T_82540,		WMP_F_COPPER },
    892 
    893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    894 	  "Intel i82540EP 1000BASE-T Ethernet",
    895 	  WM_T_82540,		WMP_F_COPPER },
    896 
    897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    898 	  "Intel i82540EP 1000BASE-T Ethernet",
    899 	  WM_T_82540,		WMP_F_COPPER },
    900 
    901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    902 	  "Intel i82545EM 1000BASE-T Ethernet",
    903 	  WM_T_82545,		WMP_F_COPPER },
    904 
    905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    906 	  "Intel i82545GM 1000BASE-T Ethernet",
    907 	  WM_T_82545_3,		WMP_F_COPPER },
    908 
    909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    910 	  "Intel i82545GM 1000BASE-X Ethernet",
    911 	  WM_T_82545_3,		WMP_F_FIBER },
    912 
    913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    914 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    915 	  WM_T_82545_3,		WMP_F_SERDES },
    916 
    917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    918 	  "Intel i82546EB 1000BASE-T Ethernet",
    919 	  WM_T_82546,		WMP_F_COPPER },
    920 
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    922 	  "Intel i82546EB 1000BASE-T Ethernet",
    923 	  WM_T_82546,		WMP_F_COPPER },
    924 
    925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    926 	  "Intel i82545EM 1000BASE-X Ethernet",
    927 	  WM_T_82545,		WMP_F_FIBER },
    928 
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    930 	  "Intel i82546EB 1000BASE-X Ethernet",
    931 	  WM_T_82546,		WMP_F_FIBER },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    934 	  "Intel i82546GB 1000BASE-T Ethernet",
    935 	  WM_T_82546_3,		WMP_F_COPPER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    938 	  "Intel i82546GB 1000BASE-X Ethernet",
    939 	  WM_T_82546_3,		WMP_F_FIBER },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    942 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    943 	  WM_T_82546_3,		WMP_F_SERDES },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    946 	  "i82546GB quad-port Gigabit Ethernet",
    947 	  WM_T_82546_3,		WMP_F_COPPER },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    950 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    951 	  WM_T_82546_3,		WMP_F_COPPER },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    954 	  "Intel PRO/1000MT (82546GB)",
    955 	  WM_T_82546_3,		WMP_F_COPPER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    958 	  "Intel i82541EI 1000BASE-T Ethernet",
    959 	  WM_T_82541,		WMP_F_COPPER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    962 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    963 	  WM_T_82541,		WMP_F_COPPER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    966 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    967 	  WM_T_82541,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    970 	  "Intel i82541ER 1000BASE-T Ethernet",
    971 	  WM_T_82541_2,		WMP_F_COPPER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    974 	  "Intel i82541GI 1000BASE-T Ethernet",
    975 	  WM_T_82541_2,		WMP_F_COPPER },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    978 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    979 	  WM_T_82541_2,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    982 	  "Intel i82541PI 1000BASE-T Ethernet",
    983 	  WM_T_82541_2,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    986 	  "Intel i82547EI 1000BASE-T Ethernet",
    987 	  WM_T_82547,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    990 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    991 	  WM_T_82547,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    994 	  "Intel i82547GI 1000BASE-T Ethernet",
    995 	  WM_T_82547_2,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    998 	  "Intel PRO/1000 PT (82571EB)",
    999 	  WM_T_82571,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1002 	  "Intel PRO/1000 PF (82571EB)",
   1003 	  WM_T_82571,		WMP_F_FIBER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1006 	  "Intel PRO/1000 PB (82571EB)",
   1007 	  WM_T_82571,		WMP_F_SERDES },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1010 	  "Intel PRO/1000 QT (82571EB)",
   1011 	  WM_T_82571,		WMP_F_COPPER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1014 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1015 	  WM_T_82571,		WMP_F_COPPER, },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1018 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1019 	  WM_T_82571,		WMP_F_COPPER, },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1022 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1023 	  WM_T_82571,		WMP_F_SERDES, },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1026 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1027 	  WM_T_82571,		WMP_F_SERDES, },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1030 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1031 	  WM_T_82571,		WMP_F_FIBER, },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1034 	  "Intel i82572EI 1000baseT Ethernet",
   1035 	  WM_T_82572,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1038 	  "Intel i82572EI 1000baseX Ethernet",
   1039 	  WM_T_82572,		WMP_F_FIBER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1042 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1043 	  WM_T_82572,		WMP_F_SERDES },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1046 	  "Intel i82572EI 1000baseT Ethernet",
   1047 	  WM_T_82572,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1050 	  "Intel i82573E",
   1051 	  WM_T_82573,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1054 	  "Intel i82573E IAMT",
   1055 	  WM_T_82573,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1058 	  "Intel i82573L Gigabit Ethernet",
   1059 	  WM_T_82573,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1062 	  "Intel i82574L",
   1063 	  WM_T_82574,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1066 	  "Intel i82574L",
   1067 	  WM_T_82574,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1070 	  "Intel i82583V",
   1071 	  WM_T_82583,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1074 	  "i80003 dual 1000baseT Ethernet",
   1075 	  WM_T_80003,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1078 	  "i80003 dual 1000baseX Ethernet",
   1079 	  WM_T_80003,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1082 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1083 	  WM_T_80003,		WMP_F_SERDES },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1086 	  "Intel i80003 1000baseT Ethernet",
   1087 	  WM_T_80003,		WMP_F_COPPER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1090 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1091 	  WM_T_80003,		WMP_F_SERDES },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1094 	  "Intel i82801H (M_AMT) LAN Controller",
   1095 	  WM_T_ICH8,		WMP_F_COPPER },
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1097 	  "Intel i82801H (AMT) LAN Controller",
   1098 	  WM_T_ICH8,		WMP_F_COPPER },
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1100 	  "Intel i82801H LAN Controller",
   1101 	  WM_T_ICH8,		WMP_F_COPPER },
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1103 	  "Intel i82801H (IFE) LAN Controller",
   1104 	  WM_T_ICH8,		WMP_F_COPPER },
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1106 	  "Intel i82801H (M) LAN Controller",
   1107 	  WM_T_ICH8,		WMP_F_COPPER },
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1109 	  "Intel i82801H IFE (GT) LAN Controller",
   1110 	  WM_T_ICH8,		WMP_F_COPPER },
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1112 	  "Intel i82801H IFE (G) LAN Controller",
   1113 	  WM_T_ICH8,		WMP_F_COPPER },
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1115 	  "82801I (AMT) LAN Controller",
   1116 	  WM_T_ICH9,		WMP_F_COPPER },
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1118 	  "82801I LAN Controller",
   1119 	  WM_T_ICH9,		WMP_F_COPPER },
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1121 	  "82801I (G) LAN Controller",
   1122 	  WM_T_ICH9,		WMP_F_COPPER },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1124 	  "82801I (GT) LAN Controller",
   1125 	  WM_T_ICH9,		WMP_F_COPPER },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1127 	  "82801I (C) LAN Controller",
   1128 	  WM_T_ICH9,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1130 	  "82801I mobile LAN Controller",
   1131 	  WM_T_ICH9,		WMP_F_COPPER },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1133 	  "82801I mobile (V) LAN Controller",
   1134 	  WM_T_ICH9,		WMP_F_COPPER },
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1136 	  "82801I mobile (AMT) LAN Controller",
   1137 	  WM_T_ICH9,		WMP_F_COPPER },
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1139 	  "82567LM-4 LAN Controller",
   1140 	  WM_T_ICH9,		WMP_F_COPPER },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1142 	  "82567V-3 LAN Controller",
   1143 	  WM_T_ICH9,		WMP_F_COPPER },
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1145 	  "82567LM-2 LAN Controller",
   1146 	  WM_T_ICH10,		WMP_F_COPPER },
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1148 	  "82567LF-2 LAN Controller",
   1149 	  WM_T_ICH10,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1151 	  "82567LM-3 LAN Controller",
   1152 	  WM_T_ICH10,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1154 	  "82567LF-3 LAN Controller",
   1155 	  WM_T_ICH10,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1157 	  "82567V-2 LAN Controller",
   1158 	  WM_T_ICH10,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1160 	  "82567V-3? LAN Controller",
   1161 	  WM_T_ICH10,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1163 	  "HANKSVILLE LAN Controller",
   1164 	  WM_T_ICH10,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1166 	  "PCH LAN (82577LM) Controller",
   1167 	  WM_T_PCH,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1169 	  "PCH LAN (82577LC) Controller",
   1170 	  WM_T_PCH,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1172 	  "PCH LAN (82578DM) Controller",
   1173 	  WM_T_PCH,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1175 	  "PCH LAN (82578DC) Controller",
   1176 	  WM_T_PCH,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1178 	  "PCH2 LAN (82579LM) Controller",
   1179 	  WM_T_PCH2,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1181 	  "PCH2 LAN (82579V) Controller",
   1182 	  WM_T_PCH2,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1184 	  "82575EB dual-1000baseT Ethernet",
   1185 	  WM_T_82575,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1187 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1188 	  WM_T_82575,		WMP_F_SERDES },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1190 	  "82575GB quad-1000baseT Ethernet",
   1191 	  WM_T_82575,		WMP_F_COPPER },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1193 	  "82575GB quad-1000baseT Ethernet (PM)",
   1194 	  WM_T_82575,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1196 	  "82576 1000BaseT Ethernet",
   1197 	  WM_T_82576,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1199 	  "82576 1000BaseX Ethernet",
   1200 	  WM_T_82576,		WMP_F_FIBER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1203 	  "82576 gigabit Ethernet (SERDES)",
   1204 	  WM_T_82576,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1207 	  "82576 quad-1000BaseT Ethernet",
   1208 	  WM_T_82576,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1211 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1212 	  WM_T_82576,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1215 	  "82576 gigabit Ethernet",
   1216 	  WM_T_82576,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1219 	  "82576 gigabit Ethernet (SERDES)",
   1220 	  WM_T_82576,		WMP_F_SERDES },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1222 	  "82576 quad-gigabit Ethernet (SERDES)",
   1223 	  WM_T_82576,		WMP_F_SERDES },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1226 	  "82580 1000BaseT Ethernet",
   1227 	  WM_T_82580,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1229 	  "82580 1000BaseX Ethernet",
   1230 	  WM_T_82580,		WMP_F_FIBER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1233 	  "82580 1000BaseT Ethernet (SERDES)",
   1234 	  WM_T_82580,		WMP_F_SERDES },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1237 	  "82580 gigabit Ethernet (SGMII)",
   1238 	  WM_T_82580,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1240 	  "82580 dual-1000BaseT Ethernet",
   1241 	  WM_T_82580,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1244 	  "82580 quad-1000BaseX Ethernet",
   1245 	  WM_T_82580,		WMP_F_FIBER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1248 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1249 	  WM_T_82580,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1252 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1253 	  WM_T_82580,		WMP_F_SERDES },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1256 	  "DH89XXCC 1000BASE-KX Ethernet",
   1257 	  WM_T_82580,		WMP_F_SERDES },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1260 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1261 	  WM_T_82580,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1264 	  "I350 Gigabit Network Connection",
   1265 	  WM_T_I350,		WMP_F_COPPER },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1268 	  "I350 Gigabit Fiber Network Connection",
   1269 	  WM_T_I350,		WMP_F_FIBER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1272 	  "I350 Gigabit Backplane Connection",
   1273 	  WM_T_I350,		WMP_F_SERDES },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1276 	  "I350 Quad Port Gigabit Ethernet",
   1277 	  WM_T_I350,		WMP_F_SERDES },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1280 	  "I350 Gigabit Connection",
   1281 	  WM_T_I350,		WMP_F_COPPER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1284 	  "I354 Gigabit Ethernet (KX)",
   1285 	  WM_T_I354,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1288 	  "I354 Gigabit Ethernet (SGMII)",
   1289 	  WM_T_I354,		WMP_F_COPPER },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1292 	  "I354 Gigabit Ethernet (2.5G)",
   1293 	  WM_T_I354,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1296 	  "I210-T1 Ethernet Server Adapter",
   1297 	  WM_T_I210,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1300 	  "I210 Ethernet (Copper OEM)",
   1301 	  WM_T_I210,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1304 	  "I210 Ethernet (Copper IT)",
   1305 	  WM_T_I210,		WMP_F_COPPER },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1308 	  "I210 Ethernet (FLASH less)",
   1309 	  WM_T_I210,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1312 	  "I210 Gigabit Ethernet (Fiber)",
   1313 	  WM_T_I210,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1316 	  "I210 Gigabit Ethernet (SERDES)",
   1317 	  WM_T_I210,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1320 	  "I210 Gigabit Ethernet (FLASH less)",
   1321 	  WM_T_I210,		WMP_F_SERDES },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1324 	  "I210 Gigabit Ethernet (SGMII)",
   1325 	  WM_T_I210,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1328 	  "I211 Ethernet (COPPER)",
   1329 	  WM_T_I211,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1331 	  "I217 V Ethernet Connection",
   1332 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1334 	  "I217 LM Ethernet Connection",
   1335 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1337 	  "I218 V Ethernet Connection",
   1338 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1340 	  "I218 V Ethernet Connection",
   1341 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1343 	  "I218 V Ethernet Connection",
   1344 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1346 	  "I218 LM Ethernet Connection",
   1347 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1349 	  "I218 LM Ethernet Connection",
   1350 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1352 	  "I218 LM Ethernet Connection",
   1353 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1354 #if 0
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1356 	  "I219 V Ethernet Connection",
   1357 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1359 	  "I219 V Ethernet Connection",
   1360 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1362 	  "I219 LM Ethernet Connection",
   1363 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1365 	  "I219 LM Ethernet Connection",
   1366 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1367 #endif
   1368 	{ 0,			0,
   1369 	  NULL,
   1370 	  0,			0 },
   1371 };
   1372 
   1373 /*
   1374  * Register read/write functions.
   1375  * Other than CSR_{READ|WRITE}().
   1376  */
   1377 
   1378 #if 0 /* Not currently used */
   1379 static inline uint32_t
   1380 wm_io_read(struct wm_softc *sc, int reg)
   1381 {
   1382 
   1383 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1384 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1385 }
   1386 #endif
   1387 
   1388 static inline void
   1389 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1390 {
   1391 
   1392 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1393 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1394 }
   1395 
   1396 static inline void
   1397 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1398     uint32_t data)
   1399 {
   1400 	uint32_t regval;
   1401 	int i;
   1402 
   1403 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1404 
   1405 	CSR_WRITE(sc, reg, regval);
   1406 
   1407 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1408 		delay(5);
   1409 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1410 			break;
   1411 	}
   1412 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1413 		aprint_error("%s: WARNING:"
   1414 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1415 		    device_xname(sc->sc_dev), reg);
   1416 	}
   1417 }
   1418 
   1419 static inline void
   1420 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1421 {
   1422 	wa->wa_low = htole32(v & 0xffffffffU);
   1423 	if (sizeof(bus_addr_t) == 8)
   1424 		wa->wa_high = htole32((uint64_t) v >> 32);
   1425 	else
   1426 		wa->wa_high = 0;
   1427 }
   1428 
   1429 /*
   1430  * Descriptor sync/init functions.
   1431  */
   1432 static inline void
   1433 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1434 {
   1435 	struct wm_softc *sc = txq->txq_sc;
   1436 
   1437 	/* If it will wrap around, sync to the end of the ring. */
   1438 	if ((start + num) > WM_NTXDESC(txq)) {
   1439 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1440 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1441 		    (WM_NTXDESC(txq) - start), ops);
   1442 		num -= (WM_NTXDESC(txq) - start);
   1443 		start = 0;
   1444 	}
   1445 
   1446 	/* Now sync whatever is left. */
   1447 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1448 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1449 }
   1450 
   1451 static inline void
   1452 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1453 {
   1454 	struct wm_softc *sc = rxq->rxq_sc;
   1455 
   1456 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1457 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1458 }
   1459 
   1460 static inline void
   1461 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1462 {
   1463 	struct wm_softc *sc = rxq->rxq_sc;
   1464 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1465 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1466 	struct mbuf *m = rxs->rxs_mbuf;
   1467 
   1468 	/*
   1469 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1470 	 * so that the payload after the Ethernet header is aligned
   1471 	 * to a 4-byte boundary.
   1472 
   1473 	 * XXX BRAINDAMAGE ALERT!
   1474 	 * The stupid chip uses the same size for every buffer, which
   1475 	 * is set in the Receive Control register.  We are using the 2K
   1476 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1477 	 * reason, we can't "scoot" packets longer than the standard
   1478 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1479 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1480 	 * the upper layer copy the headers.
   1481 	 */
   1482 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1483 
   1484 	wm_set_dma_addr(&rxd->wrx_addr,
   1485 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1486 	rxd->wrx_len = 0;
   1487 	rxd->wrx_cksum = 0;
   1488 	rxd->wrx_status = 0;
   1489 	rxd->wrx_errors = 0;
   1490 	rxd->wrx_special = 0;
   1491 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1492 
   1493 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1494 }
   1495 
   1496 /*
   1497  * Device driver interface functions and commonly used functions.
   1498  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1499  */
   1500 
   1501 /* Lookup supported device table */
   1502 static const struct wm_product *
   1503 wm_lookup(const struct pci_attach_args *pa)
   1504 {
   1505 	const struct wm_product *wmp;
   1506 
   1507 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1508 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1509 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1510 			return wmp;
   1511 	}
   1512 	return NULL;
   1513 }
   1514 
   1515 /* The match function (ca_match) */
   1516 static int
   1517 wm_match(device_t parent, cfdata_t cf, void *aux)
   1518 {
   1519 	struct pci_attach_args *pa = aux;
   1520 
   1521 	if (wm_lookup(pa) != NULL)
   1522 		return 1;
   1523 
   1524 	return 0;
   1525 }
   1526 
   1527 /* The attach function (ca_attach) */
   1528 static void
   1529 wm_attach(device_t parent, device_t self, void *aux)
   1530 {
   1531 	struct wm_softc *sc = device_private(self);
   1532 	struct pci_attach_args *pa = aux;
   1533 	prop_dictionary_t dict;
   1534 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1535 	pci_chipset_tag_t pc = pa->pa_pc;
   1536 	int counts[PCI_INTR_TYPE_SIZE];
   1537 	pci_intr_type_t max_type;
   1538 	const char *eetype, *xname;
   1539 	bus_space_tag_t memt;
   1540 	bus_space_handle_t memh;
   1541 	bus_size_t memsize;
   1542 	int memh_valid;
   1543 	int i, error;
   1544 	const struct wm_product *wmp;
   1545 	prop_data_t ea;
   1546 	prop_number_t pn;
   1547 	uint8_t enaddr[ETHER_ADDR_LEN];
   1548 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1549 	pcireg_t preg, memtype;
   1550 	uint16_t eeprom_data, apme_mask;
   1551 	bool force_clear_smbi;
   1552 	uint32_t link_mode;
   1553 	uint32_t reg;
   1554 
   1555 	sc->sc_dev = self;
   1556 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1557 	sc->sc_stopping = false;
   1558 
   1559 	wmp = wm_lookup(pa);
   1560 #ifdef DIAGNOSTIC
   1561 	if (wmp == NULL) {
   1562 		printf("\n");
   1563 		panic("wm_attach: impossible");
   1564 	}
   1565 #endif
   1566 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1567 
   1568 	sc->sc_pc = pa->pa_pc;
   1569 	sc->sc_pcitag = pa->pa_tag;
   1570 
   1571 	if (pci_dma64_available(pa))
   1572 		sc->sc_dmat = pa->pa_dmat64;
   1573 	else
   1574 		sc->sc_dmat = pa->pa_dmat;
   1575 
   1576 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1577 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1578 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1579 
   1580 	sc->sc_type = wmp->wmp_type;
   1581 	if (sc->sc_type < WM_T_82543) {
   1582 		if (sc->sc_rev < 2) {
   1583 			aprint_error_dev(sc->sc_dev,
   1584 			    "i82542 must be at least rev. 2\n");
   1585 			return;
   1586 		}
   1587 		if (sc->sc_rev < 3)
   1588 			sc->sc_type = WM_T_82542_2_0;
   1589 	}
   1590 
   1591 	/*
   1592 	 * Disable MSI for Errata:
   1593 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1594 	 *
   1595 	 *  82544: Errata 25
   1596 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1597 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1598 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1599 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1600 	 *
   1601 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1602 	 *
   1603 	 *  82571 & 82572: Errata 63
   1604 	 */
   1605 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1606 	    || (sc->sc_type == WM_T_82572))
   1607 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1608 
   1609 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1610 	    || (sc->sc_type == WM_T_82580)
   1611 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1612 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1613 		sc->sc_flags |= WM_F_NEWQUEUE;
   1614 
   1615 	/* Set device properties (mactype) */
   1616 	dict = device_properties(sc->sc_dev);
   1617 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1618 
   1619 	/*
   1620 	 * Map the device.  All devices support memory-mapped acccess,
   1621 	 * and it is really required for normal operation.
   1622 	 */
   1623 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1624 	switch (memtype) {
   1625 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1626 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1627 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1628 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1629 		break;
   1630 	default:
   1631 		memh_valid = 0;
   1632 		break;
   1633 	}
   1634 
   1635 	if (memh_valid) {
   1636 		sc->sc_st = memt;
   1637 		sc->sc_sh = memh;
   1638 		sc->sc_ss = memsize;
   1639 	} else {
   1640 		aprint_error_dev(sc->sc_dev,
   1641 		    "unable to map device registers\n");
   1642 		return;
   1643 	}
   1644 
   1645 	/*
   1646 	 * In addition, i82544 and later support I/O mapped indirect
   1647 	 * register access.  It is not desirable (nor supported in
   1648 	 * this driver) to use it for normal operation, though it is
   1649 	 * required to work around bugs in some chip versions.
   1650 	 */
   1651 	if (sc->sc_type >= WM_T_82544) {
   1652 		/* First we have to find the I/O BAR. */
   1653 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1654 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1655 			if (memtype == PCI_MAPREG_TYPE_IO)
   1656 				break;
   1657 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1658 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1659 				i += 4;	/* skip high bits, too */
   1660 		}
   1661 		if (i < PCI_MAPREG_END) {
   1662 			/*
   1663 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1664 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1665 			 * It's no problem because newer chips has no this
   1666 			 * bug.
   1667 			 *
   1668 			 * The i8254x doesn't apparently respond when the
   1669 			 * I/O BAR is 0, which looks somewhat like it's not
   1670 			 * been configured.
   1671 			 */
   1672 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1673 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1674 				aprint_error_dev(sc->sc_dev,
   1675 				    "WARNING: I/O BAR at zero.\n");
   1676 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1677 					0, &sc->sc_iot, &sc->sc_ioh,
   1678 					NULL, &sc->sc_ios) == 0) {
   1679 				sc->sc_flags |= WM_F_IOH_VALID;
   1680 			} else {
   1681 				aprint_error_dev(sc->sc_dev,
   1682 				    "WARNING: unable to map I/O space\n");
   1683 			}
   1684 		}
   1685 
   1686 	}
   1687 
   1688 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1689 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1690 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1691 	if (sc->sc_type < WM_T_82542_2_1)
   1692 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1693 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1694 
   1695 	/* power up chip */
   1696 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1697 	    NULL)) && error != EOPNOTSUPP) {
   1698 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1699 		return;
   1700 	}
   1701 
   1702 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1703 
   1704 	/* Allocation settings */
   1705 	max_type = PCI_INTR_TYPE_MSIX;
   1706 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1707 	counts[PCI_INTR_TYPE_MSI] = 1;
   1708 	counts[PCI_INTR_TYPE_INTX] = 1;
   1709 
   1710 alloc_retry:
   1711 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1712 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1713 		return;
   1714 	}
   1715 
   1716 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1717 		error = wm_setup_msix(sc);
   1718 		if (error) {
   1719 			pci_intr_release(pc, sc->sc_intrs,
   1720 			    counts[PCI_INTR_TYPE_MSIX]);
   1721 
   1722 			/* Setup for MSI: Disable MSI-X */
   1723 			max_type = PCI_INTR_TYPE_MSI;
   1724 			counts[PCI_INTR_TYPE_MSI] = 1;
   1725 			counts[PCI_INTR_TYPE_INTX] = 1;
   1726 			goto alloc_retry;
   1727 		}
   1728 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1729 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1730 		error = wm_setup_legacy(sc);
   1731 		if (error) {
   1732 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1733 			    counts[PCI_INTR_TYPE_MSI]);
   1734 
   1735 			/* The next try is for INTx: Disable MSI */
   1736 			max_type = PCI_INTR_TYPE_INTX;
   1737 			counts[PCI_INTR_TYPE_INTX] = 1;
   1738 			goto alloc_retry;
   1739 		}
   1740 	} else {
   1741 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1742 		error = wm_setup_legacy(sc);
   1743 		if (error) {
   1744 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1745 			    counts[PCI_INTR_TYPE_INTX]);
   1746 			return;
   1747 		}
   1748 	}
   1749 
   1750 	/*
   1751 	 * Check the function ID (unit number of the chip).
   1752 	 */
   1753 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1754 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1755 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1756 	    || (sc->sc_type == WM_T_82580)
   1757 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1758 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1759 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1760 	else
   1761 		sc->sc_funcid = 0;
   1762 
   1763 	/*
   1764 	 * Determine a few things about the bus we're connected to.
   1765 	 */
   1766 	if (sc->sc_type < WM_T_82543) {
   1767 		/* We don't really know the bus characteristics here. */
   1768 		sc->sc_bus_speed = 33;
   1769 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1770 		/*
   1771 		 * CSA (Communication Streaming Architecture) is about as fast
   1772 		 * a 32-bit 66MHz PCI Bus.
   1773 		 */
   1774 		sc->sc_flags |= WM_F_CSA;
   1775 		sc->sc_bus_speed = 66;
   1776 		aprint_verbose_dev(sc->sc_dev,
   1777 		    "Communication Streaming Architecture\n");
   1778 		if (sc->sc_type == WM_T_82547) {
   1779 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1780 			callout_setfunc(&sc->sc_txfifo_ch,
   1781 					wm_82547_txfifo_stall, sc);
   1782 			aprint_verbose_dev(sc->sc_dev,
   1783 			    "using 82547 Tx FIFO stall work-around\n");
   1784 		}
   1785 	} else if (sc->sc_type >= WM_T_82571) {
   1786 		sc->sc_flags |= WM_F_PCIE;
   1787 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1788 		    && (sc->sc_type != WM_T_ICH10)
   1789 		    && (sc->sc_type != WM_T_PCH)
   1790 		    && (sc->sc_type != WM_T_PCH2)
   1791 		    && (sc->sc_type != WM_T_PCH_LPT)
   1792 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1793 			/* ICH* and PCH* have no PCIe capability registers */
   1794 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1795 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1796 				NULL) == 0)
   1797 				aprint_error_dev(sc->sc_dev,
   1798 				    "unable to find PCIe capability\n");
   1799 		}
   1800 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1801 	} else {
   1802 		reg = CSR_READ(sc, WMREG_STATUS);
   1803 		if (reg & STATUS_BUS64)
   1804 			sc->sc_flags |= WM_F_BUS64;
   1805 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1806 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1807 
   1808 			sc->sc_flags |= WM_F_PCIX;
   1809 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1810 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1811 				aprint_error_dev(sc->sc_dev,
   1812 				    "unable to find PCIX capability\n");
   1813 			else if (sc->sc_type != WM_T_82545_3 &&
   1814 				 sc->sc_type != WM_T_82546_3) {
   1815 				/*
   1816 				 * Work around a problem caused by the BIOS
   1817 				 * setting the max memory read byte count
   1818 				 * incorrectly.
   1819 				 */
   1820 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1821 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1822 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1823 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1824 
   1825 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1826 				    PCIX_CMD_BYTECNT_SHIFT;
   1827 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1828 				    PCIX_STATUS_MAXB_SHIFT;
   1829 				if (bytecnt > maxb) {
   1830 					aprint_verbose_dev(sc->sc_dev,
   1831 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1832 					    512 << bytecnt, 512 << maxb);
   1833 					pcix_cmd = (pcix_cmd &
   1834 					    ~PCIX_CMD_BYTECNT_MASK) |
   1835 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1836 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1837 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1838 					    pcix_cmd);
   1839 				}
   1840 			}
   1841 		}
   1842 		/*
   1843 		 * The quad port adapter is special; it has a PCIX-PCIX
   1844 		 * bridge on the board, and can run the secondary bus at
   1845 		 * a higher speed.
   1846 		 */
   1847 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1848 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1849 								      : 66;
   1850 		} else if (sc->sc_flags & WM_F_PCIX) {
   1851 			switch (reg & STATUS_PCIXSPD_MASK) {
   1852 			case STATUS_PCIXSPD_50_66:
   1853 				sc->sc_bus_speed = 66;
   1854 				break;
   1855 			case STATUS_PCIXSPD_66_100:
   1856 				sc->sc_bus_speed = 100;
   1857 				break;
   1858 			case STATUS_PCIXSPD_100_133:
   1859 				sc->sc_bus_speed = 133;
   1860 				break;
   1861 			default:
   1862 				aprint_error_dev(sc->sc_dev,
   1863 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1864 				    reg & STATUS_PCIXSPD_MASK);
   1865 				sc->sc_bus_speed = 66;
   1866 				break;
   1867 			}
   1868 		} else
   1869 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1870 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1871 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1872 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1873 	}
   1874 
   1875 	/* clear interesting stat counters */
   1876 	CSR_READ(sc, WMREG_COLC);
   1877 	CSR_READ(sc, WMREG_RXERRC);
   1878 
   1879 	/* get PHY control from SMBus to PCIe */
   1880 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1881 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1882 		wm_smbustopci(sc);
   1883 
   1884 	/* Reset the chip to a known state. */
   1885 	wm_reset(sc);
   1886 
   1887 	/* Get some information about the EEPROM. */
   1888 	switch (sc->sc_type) {
   1889 	case WM_T_82542_2_0:
   1890 	case WM_T_82542_2_1:
   1891 	case WM_T_82543:
   1892 	case WM_T_82544:
   1893 		/* Microwire */
   1894 		sc->sc_nvm_wordsize = 64;
   1895 		sc->sc_nvm_addrbits = 6;
   1896 		break;
   1897 	case WM_T_82540:
   1898 	case WM_T_82545:
   1899 	case WM_T_82545_3:
   1900 	case WM_T_82546:
   1901 	case WM_T_82546_3:
   1902 		/* Microwire */
   1903 		reg = CSR_READ(sc, WMREG_EECD);
   1904 		if (reg & EECD_EE_SIZE) {
   1905 			sc->sc_nvm_wordsize = 256;
   1906 			sc->sc_nvm_addrbits = 8;
   1907 		} else {
   1908 			sc->sc_nvm_wordsize = 64;
   1909 			sc->sc_nvm_addrbits = 6;
   1910 		}
   1911 		sc->sc_flags |= WM_F_LOCK_EECD;
   1912 		break;
   1913 	case WM_T_82541:
   1914 	case WM_T_82541_2:
   1915 	case WM_T_82547:
   1916 	case WM_T_82547_2:
   1917 		sc->sc_flags |= WM_F_LOCK_EECD;
   1918 		reg = CSR_READ(sc, WMREG_EECD);
   1919 		if (reg & EECD_EE_TYPE) {
   1920 			/* SPI */
   1921 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1922 			wm_nvm_set_addrbits_size_eecd(sc);
   1923 		} else {
   1924 			/* Microwire */
   1925 			if ((reg & EECD_EE_ABITS) != 0) {
   1926 				sc->sc_nvm_wordsize = 256;
   1927 				sc->sc_nvm_addrbits = 8;
   1928 			} else {
   1929 				sc->sc_nvm_wordsize = 64;
   1930 				sc->sc_nvm_addrbits = 6;
   1931 			}
   1932 		}
   1933 		break;
   1934 	case WM_T_82571:
   1935 	case WM_T_82572:
   1936 		/* SPI */
   1937 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1938 		wm_nvm_set_addrbits_size_eecd(sc);
   1939 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1940 		break;
   1941 	case WM_T_82573:
   1942 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1943 		/* FALLTHROUGH */
   1944 	case WM_T_82574:
   1945 	case WM_T_82583:
   1946 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1947 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1948 			sc->sc_nvm_wordsize = 2048;
   1949 		} else {
   1950 			/* SPI */
   1951 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1952 			wm_nvm_set_addrbits_size_eecd(sc);
   1953 		}
   1954 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1955 		break;
   1956 	case WM_T_82575:
   1957 	case WM_T_82576:
   1958 	case WM_T_82580:
   1959 	case WM_T_I350:
   1960 	case WM_T_I354:
   1961 	case WM_T_80003:
   1962 		/* SPI */
   1963 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1964 		wm_nvm_set_addrbits_size_eecd(sc);
   1965 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1966 		    | WM_F_LOCK_SWSM;
   1967 		break;
   1968 	case WM_T_ICH8:
   1969 	case WM_T_ICH9:
   1970 	case WM_T_ICH10:
   1971 	case WM_T_PCH:
   1972 	case WM_T_PCH2:
   1973 	case WM_T_PCH_LPT:
   1974 		/* FLASH */
   1975 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1976 		sc->sc_nvm_wordsize = 2048;
   1977 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1978 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1979 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1980 			aprint_error_dev(sc->sc_dev,
   1981 			    "can't map FLASH registers\n");
   1982 			goto out;
   1983 		}
   1984 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1985 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1986 		    ICH_FLASH_SECTOR_SIZE;
   1987 		sc->sc_ich8_flash_bank_size =
   1988 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1989 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1990 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1991 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1992 		sc->sc_flashreg_offset = 0;
   1993 		break;
   1994 	case WM_T_PCH_SPT:
   1995 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1996 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1997 		sc->sc_flasht = sc->sc_st;
   1998 		sc->sc_flashh = sc->sc_sh;
   1999 		sc->sc_ich8_flash_base = 0;
   2000 		sc->sc_nvm_wordsize =
   2001 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2002 			* NVM_SIZE_MULTIPLIER;
   2003 		/* It is size in bytes, we want words */
   2004 		sc->sc_nvm_wordsize /= 2;
   2005 		/* assume 2 banks */
   2006 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2007 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2008 		break;
   2009 	case WM_T_I210:
   2010 	case WM_T_I211:
   2011 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2012 			wm_nvm_set_addrbits_size_eecd(sc);
   2013 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2014 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   2015 		} else {
   2016 			sc->sc_nvm_wordsize = INVM_SIZE;
   2017 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2018 			sc->sc_flags |= WM_F_LOCK_SWFW;
   2019 		}
   2020 		break;
   2021 	default:
   2022 		break;
   2023 	}
   2024 
   2025 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2026 	switch (sc->sc_type) {
   2027 	case WM_T_82571:
   2028 	case WM_T_82572:
   2029 		reg = CSR_READ(sc, WMREG_SWSM2);
   2030 		if ((reg & SWSM2_LOCK) == 0) {
   2031 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2032 			force_clear_smbi = true;
   2033 		} else
   2034 			force_clear_smbi = false;
   2035 		break;
   2036 	case WM_T_82573:
   2037 	case WM_T_82574:
   2038 	case WM_T_82583:
   2039 		force_clear_smbi = true;
   2040 		break;
   2041 	default:
   2042 		force_clear_smbi = false;
   2043 		break;
   2044 	}
   2045 	if (force_clear_smbi) {
   2046 		reg = CSR_READ(sc, WMREG_SWSM);
   2047 		if ((reg & SWSM_SMBI) != 0)
   2048 			aprint_error_dev(sc->sc_dev,
   2049 			    "Please update the Bootagent\n");
   2050 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2051 	}
   2052 
   2053 	/*
   2054 	 * Defer printing the EEPROM type until after verifying the checksum
   2055 	 * This allows the EEPROM type to be printed correctly in the case
   2056 	 * that no EEPROM is attached.
   2057 	 */
   2058 	/*
   2059 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2060 	 * this for later, so we can fail future reads from the EEPROM.
   2061 	 */
   2062 	if (wm_nvm_validate_checksum(sc)) {
   2063 		/*
   2064 		 * Read twice again because some PCI-e parts fail the
   2065 		 * first check due to the link being in sleep state.
   2066 		 */
   2067 		if (wm_nvm_validate_checksum(sc))
   2068 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2069 	}
   2070 
   2071 	/* Set device properties (macflags) */
   2072 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2073 
   2074 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2075 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2076 	else {
   2077 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2078 		    sc->sc_nvm_wordsize);
   2079 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2080 			aprint_verbose("iNVM");
   2081 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2082 			aprint_verbose("FLASH(HW)");
   2083 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2084 			aprint_verbose("FLASH");
   2085 		else {
   2086 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2087 				eetype = "SPI";
   2088 			else
   2089 				eetype = "MicroWire";
   2090 			aprint_verbose("(%d address bits) %s EEPROM",
   2091 			    sc->sc_nvm_addrbits, eetype);
   2092 		}
   2093 	}
   2094 	wm_nvm_version(sc);
   2095 	aprint_verbose("\n");
   2096 
   2097 	/* Check for I21[01] PLL workaround */
   2098 	if (sc->sc_type == WM_T_I210)
   2099 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2100 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2101 		/* NVM image release 3.25 has a workaround */
   2102 		if ((sc->sc_nvm_ver_major < 3)
   2103 		    || ((sc->sc_nvm_ver_major == 3)
   2104 			&& (sc->sc_nvm_ver_minor < 25))) {
   2105 			aprint_verbose_dev(sc->sc_dev,
   2106 			    "ROM image version %d.%d is older than 3.25\n",
   2107 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2108 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2109 		}
   2110 	}
   2111 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2112 		wm_pll_workaround_i210(sc);
   2113 
   2114 	wm_get_wakeup(sc);
   2115 	switch (sc->sc_type) {
   2116 	case WM_T_82571:
   2117 	case WM_T_82572:
   2118 	case WM_T_82573:
   2119 	case WM_T_82574:
   2120 	case WM_T_82583:
   2121 	case WM_T_80003:
   2122 	case WM_T_ICH8:
   2123 	case WM_T_ICH9:
   2124 	case WM_T_ICH10:
   2125 	case WM_T_PCH:
   2126 	case WM_T_PCH2:
   2127 	case WM_T_PCH_LPT:
   2128 	case WM_T_PCH_SPT:
   2129 		/* Non-AMT based hardware can now take control from firmware */
   2130 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2131 			wm_get_hw_control(sc);
   2132 		break;
   2133 	default:
   2134 		break;
   2135 	}
   2136 
   2137 	/*
   2138 	 * Read the Ethernet address from the EEPROM, if not first found
   2139 	 * in device properties.
   2140 	 */
   2141 	ea = prop_dictionary_get(dict, "mac-address");
   2142 	if (ea != NULL) {
   2143 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2144 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2145 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2146 	} else {
   2147 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2148 			aprint_error_dev(sc->sc_dev,
   2149 			    "unable to read Ethernet address\n");
   2150 			goto out;
   2151 		}
   2152 	}
   2153 
   2154 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2155 	    ether_sprintf(enaddr));
   2156 
   2157 	/*
   2158 	 * Read the config info from the EEPROM, and set up various
   2159 	 * bits in the control registers based on their contents.
   2160 	 */
   2161 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2162 	if (pn != NULL) {
   2163 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2164 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2165 	} else {
   2166 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2167 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2168 			goto out;
   2169 		}
   2170 	}
   2171 
   2172 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2173 	if (pn != NULL) {
   2174 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2175 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2176 	} else {
   2177 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2178 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2179 			goto out;
   2180 		}
   2181 	}
   2182 
   2183 	/* check for WM_F_WOL */
   2184 	switch (sc->sc_type) {
   2185 	case WM_T_82542_2_0:
   2186 	case WM_T_82542_2_1:
   2187 	case WM_T_82543:
   2188 		/* dummy? */
   2189 		eeprom_data = 0;
   2190 		apme_mask = NVM_CFG3_APME;
   2191 		break;
   2192 	case WM_T_82544:
   2193 		apme_mask = NVM_CFG2_82544_APM_EN;
   2194 		eeprom_data = cfg2;
   2195 		break;
   2196 	case WM_T_82546:
   2197 	case WM_T_82546_3:
   2198 	case WM_T_82571:
   2199 	case WM_T_82572:
   2200 	case WM_T_82573:
   2201 	case WM_T_82574:
   2202 	case WM_T_82583:
   2203 	case WM_T_80003:
   2204 	default:
   2205 		apme_mask = NVM_CFG3_APME;
   2206 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2207 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2208 		break;
   2209 	case WM_T_82575:
   2210 	case WM_T_82576:
   2211 	case WM_T_82580:
   2212 	case WM_T_I350:
   2213 	case WM_T_I354: /* XXX ok? */
   2214 	case WM_T_ICH8:
   2215 	case WM_T_ICH9:
   2216 	case WM_T_ICH10:
   2217 	case WM_T_PCH:
   2218 	case WM_T_PCH2:
   2219 	case WM_T_PCH_LPT:
   2220 	case WM_T_PCH_SPT:
   2221 		/* XXX The funcid should be checked on some devices */
   2222 		apme_mask = WUC_APME;
   2223 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2224 		break;
   2225 	}
   2226 
   2227 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2228 	if ((eeprom_data & apme_mask) != 0)
   2229 		sc->sc_flags |= WM_F_WOL;
   2230 #ifdef WM_DEBUG
   2231 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2232 		printf("WOL\n");
   2233 #endif
   2234 
   2235 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2236 		/* Check NVM for autonegotiation */
   2237 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2238 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2239 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2240 		}
   2241 	}
   2242 
   2243 	/*
   2244 	 * XXX need special handling for some multiple port cards
   2245 	 * to disable a paticular port.
   2246 	 */
   2247 
   2248 	if (sc->sc_type >= WM_T_82544) {
   2249 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2250 		if (pn != NULL) {
   2251 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2252 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2253 		} else {
   2254 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2255 				aprint_error_dev(sc->sc_dev,
   2256 				    "unable to read SWDPIN\n");
   2257 				goto out;
   2258 			}
   2259 		}
   2260 	}
   2261 
   2262 	if (cfg1 & NVM_CFG1_ILOS)
   2263 		sc->sc_ctrl |= CTRL_ILOS;
   2264 
   2265 	/*
   2266 	 * XXX
   2267 	 * This code isn't correct because pin 2 and 3 are located
   2268 	 * in different position on newer chips. Check all datasheet.
   2269 	 *
   2270 	 * Until resolve this problem, check if a chip < 82580
   2271 	 */
   2272 	if (sc->sc_type <= WM_T_82580) {
   2273 		if (sc->sc_type >= WM_T_82544) {
   2274 			sc->sc_ctrl |=
   2275 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2276 			    CTRL_SWDPIO_SHIFT;
   2277 			sc->sc_ctrl |=
   2278 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2279 			    CTRL_SWDPINS_SHIFT;
   2280 		} else {
   2281 			sc->sc_ctrl |=
   2282 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2283 			    CTRL_SWDPIO_SHIFT;
   2284 		}
   2285 	}
   2286 
   2287 	/* XXX For other than 82580? */
   2288 	if (sc->sc_type == WM_T_82580) {
   2289 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2290 		if (nvmword & __BIT(13))
   2291 			sc->sc_ctrl |= CTRL_ILOS;
   2292 	}
   2293 
   2294 #if 0
   2295 	if (sc->sc_type >= WM_T_82544) {
   2296 		if (cfg1 & NVM_CFG1_IPS0)
   2297 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2298 		if (cfg1 & NVM_CFG1_IPS1)
   2299 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2300 		sc->sc_ctrl_ext |=
   2301 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2302 		    CTRL_EXT_SWDPIO_SHIFT;
   2303 		sc->sc_ctrl_ext |=
   2304 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2305 		    CTRL_EXT_SWDPINS_SHIFT;
   2306 	} else {
   2307 		sc->sc_ctrl_ext |=
   2308 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2309 		    CTRL_EXT_SWDPIO_SHIFT;
   2310 	}
   2311 #endif
   2312 
   2313 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2314 #if 0
   2315 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2316 #endif
   2317 
   2318 	if (sc->sc_type == WM_T_PCH) {
   2319 		uint16_t val;
   2320 
   2321 		/* Save the NVM K1 bit setting */
   2322 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2323 
   2324 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2325 			sc->sc_nvm_k1_enabled = 1;
   2326 		else
   2327 			sc->sc_nvm_k1_enabled = 0;
   2328 	}
   2329 
   2330 	/*
   2331 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2332 	 * media structures accordingly.
   2333 	 */
   2334 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2335 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2336 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2337 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2338 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2339 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2340 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2341 	} else if (sc->sc_type < WM_T_82543 ||
   2342 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2343 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2344 			aprint_error_dev(sc->sc_dev,
   2345 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2346 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2347 		}
   2348 		wm_tbi_mediainit(sc);
   2349 	} else {
   2350 		switch (sc->sc_type) {
   2351 		case WM_T_82575:
   2352 		case WM_T_82576:
   2353 		case WM_T_82580:
   2354 		case WM_T_I350:
   2355 		case WM_T_I354:
   2356 		case WM_T_I210:
   2357 		case WM_T_I211:
   2358 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2359 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2360 			switch (link_mode) {
   2361 			case CTRL_EXT_LINK_MODE_1000KX:
   2362 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2363 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2364 				break;
   2365 			case CTRL_EXT_LINK_MODE_SGMII:
   2366 				if (wm_sgmii_uses_mdio(sc)) {
   2367 					aprint_verbose_dev(sc->sc_dev,
   2368 					    "SGMII(MDIO)\n");
   2369 					sc->sc_flags |= WM_F_SGMII;
   2370 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2371 					break;
   2372 				}
   2373 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2374 				/*FALLTHROUGH*/
   2375 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2376 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2377 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2378 					if (link_mode
   2379 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2380 						sc->sc_mediatype
   2381 						    = WM_MEDIATYPE_COPPER;
   2382 						sc->sc_flags |= WM_F_SGMII;
   2383 					} else {
   2384 						sc->sc_mediatype
   2385 						    = WM_MEDIATYPE_SERDES;
   2386 						aprint_verbose_dev(sc->sc_dev,
   2387 						    "SERDES\n");
   2388 					}
   2389 					break;
   2390 				}
   2391 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2392 					aprint_verbose_dev(sc->sc_dev,
   2393 					    "SERDES\n");
   2394 
   2395 				/* Change current link mode setting */
   2396 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2397 				switch (sc->sc_mediatype) {
   2398 				case WM_MEDIATYPE_COPPER:
   2399 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2400 					break;
   2401 				case WM_MEDIATYPE_SERDES:
   2402 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2403 					break;
   2404 				default:
   2405 					break;
   2406 				}
   2407 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2408 				break;
   2409 			case CTRL_EXT_LINK_MODE_GMII:
   2410 			default:
   2411 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2412 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2413 				break;
   2414 			}
   2415 
   2416 			reg &= ~CTRL_EXT_I2C_ENA;
   2417 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2418 				reg |= CTRL_EXT_I2C_ENA;
   2419 			else
   2420 				reg &= ~CTRL_EXT_I2C_ENA;
   2421 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2422 
   2423 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2424 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2425 			else
   2426 				wm_tbi_mediainit(sc);
   2427 			break;
   2428 		default:
   2429 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2430 				aprint_error_dev(sc->sc_dev,
   2431 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2432 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2433 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2434 		}
   2435 	}
   2436 
   2437 	ifp = &sc->sc_ethercom.ec_if;
   2438 	xname = device_xname(sc->sc_dev);
   2439 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2440 	ifp->if_softc = sc;
   2441 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2442 	ifp->if_extflags = IFEF_START_MPSAFE;
   2443 	ifp->if_ioctl = wm_ioctl;
   2444 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2445 		ifp->if_start = wm_nq_start;
   2446 		if (sc->sc_nqueues > 1)
   2447 			ifp->if_transmit = wm_nq_transmit;
   2448 	} else
   2449 		ifp->if_start = wm_start;
   2450 	ifp->if_watchdog = wm_watchdog;
   2451 	ifp->if_init = wm_init;
   2452 	ifp->if_stop = wm_stop;
   2453 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2454 	IFQ_SET_READY(&ifp->if_snd);
   2455 
   2456 	/* Check for jumbo frame */
   2457 	switch (sc->sc_type) {
   2458 	case WM_T_82573:
   2459 		/* XXX limited to 9234 if ASPM is disabled */
   2460 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2461 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2462 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2463 		break;
   2464 	case WM_T_82571:
   2465 	case WM_T_82572:
   2466 	case WM_T_82574:
   2467 	case WM_T_82575:
   2468 	case WM_T_82576:
   2469 	case WM_T_82580:
   2470 	case WM_T_I350:
   2471 	case WM_T_I354: /* XXXX ok? */
   2472 	case WM_T_I210:
   2473 	case WM_T_I211:
   2474 	case WM_T_80003:
   2475 	case WM_T_ICH9:
   2476 	case WM_T_ICH10:
   2477 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2478 	case WM_T_PCH_LPT:
   2479 	case WM_T_PCH_SPT:
   2480 		/* XXX limited to 9234 */
   2481 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2482 		break;
   2483 	case WM_T_PCH:
   2484 		/* XXX limited to 4096 */
   2485 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2486 		break;
   2487 	case WM_T_82542_2_0:
   2488 	case WM_T_82542_2_1:
   2489 	case WM_T_82583:
   2490 	case WM_T_ICH8:
   2491 		/* No support for jumbo frame */
   2492 		break;
   2493 	default:
   2494 		/* ETHER_MAX_LEN_JUMBO */
   2495 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2496 		break;
   2497 	}
   2498 
   2499 	/* If we're a i82543 or greater, we can support VLANs. */
   2500 	if (sc->sc_type >= WM_T_82543)
   2501 		sc->sc_ethercom.ec_capabilities |=
   2502 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2503 
   2504 	/*
   2505 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2506 	 * on i82543 and later.
   2507 	 */
   2508 	if (sc->sc_type >= WM_T_82543) {
   2509 		ifp->if_capabilities |=
   2510 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2511 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2512 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2513 		    IFCAP_CSUM_TCPv6_Tx |
   2514 		    IFCAP_CSUM_UDPv6_Tx;
   2515 	}
   2516 
   2517 	/*
   2518 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2519 	 *
   2520 	 *	82541GI (8086:1076) ... no
   2521 	 *	82572EI (8086:10b9) ... yes
   2522 	 */
   2523 	if (sc->sc_type >= WM_T_82571) {
   2524 		ifp->if_capabilities |=
   2525 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2526 	}
   2527 
   2528 	/*
   2529 	 * If we're a i82544 or greater (except i82547), we can do
   2530 	 * TCP segmentation offload.
   2531 	 */
   2532 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2533 		ifp->if_capabilities |= IFCAP_TSOv4;
   2534 	}
   2535 
   2536 	if (sc->sc_type >= WM_T_82571) {
   2537 		ifp->if_capabilities |= IFCAP_TSOv6;
   2538 	}
   2539 
   2540 #ifdef WM_MPSAFE
   2541 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2542 #else
   2543 	sc->sc_core_lock = NULL;
   2544 #endif
   2545 
   2546 	/* Attach the interface. */
   2547 	if_initialize(ifp);
   2548 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2549 	ether_ifattach(ifp, enaddr);
   2550 	if_register(ifp);
   2551 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2552 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2553 			  RND_FLAG_DEFAULT);
   2554 
   2555 #ifdef WM_EVENT_COUNTERS
   2556 	/* Attach event counters. */
   2557 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2558 	    NULL, xname, "linkintr");
   2559 
   2560 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2561 	    NULL, xname, "tx_xoff");
   2562 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2563 	    NULL, xname, "tx_xon");
   2564 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2565 	    NULL, xname, "rx_xoff");
   2566 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2567 	    NULL, xname, "rx_xon");
   2568 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2569 	    NULL, xname, "rx_macctl");
   2570 #endif /* WM_EVENT_COUNTERS */
   2571 
   2572 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2573 		pmf_class_network_register(self, ifp);
   2574 	else
   2575 		aprint_error_dev(self, "couldn't establish power handler\n");
   2576 
   2577 	sc->sc_flags |= WM_F_ATTACHED;
   2578  out:
   2579 	return;
   2580 }
   2581 
   2582 /* The detach function (ca_detach) */
   2583 static int
   2584 wm_detach(device_t self, int flags __unused)
   2585 {
   2586 	struct wm_softc *sc = device_private(self);
   2587 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2588 	int i;
   2589 
   2590 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2591 		return 0;
   2592 
   2593 	/* Stop the interface. Callouts are stopped in it. */
   2594 	wm_stop(ifp, 1);
   2595 
   2596 	pmf_device_deregister(self);
   2597 
   2598 	/* Tell the firmware about the release */
   2599 	WM_CORE_LOCK(sc);
   2600 	wm_release_manageability(sc);
   2601 	wm_release_hw_control(sc);
   2602 	WM_CORE_UNLOCK(sc);
   2603 
   2604 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2605 
   2606 	/* Delete all remaining media. */
   2607 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2608 
   2609 	ether_ifdetach(ifp);
   2610 	if_detach(ifp);
   2611 	if_percpuq_destroy(sc->sc_ipq);
   2612 
   2613 	/* Unload RX dmamaps and free mbufs */
   2614 	for (i = 0; i < sc->sc_nqueues; i++) {
   2615 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2616 		mutex_enter(rxq->rxq_lock);
   2617 		wm_rxdrain(rxq);
   2618 		mutex_exit(rxq->rxq_lock);
   2619 	}
   2620 	/* Must unlock here */
   2621 
   2622 	/* Disestablish the interrupt handler */
   2623 	for (i = 0; i < sc->sc_nintrs; i++) {
   2624 		if (sc->sc_ihs[i] != NULL) {
   2625 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2626 			sc->sc_ihs[i] = NULL;
   2627 		}
   2628 	}
   2629 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2630 
   2631 	wm_free_txrx_queues(sc);
   2632 
   2633 	/* Unmap the registers */
   2634 	if (sc->sc_ss) {
   2635 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2636 		sc->sc_ss = 0;
   2637 	}
   2638 	if (sc->sc_ios) {
   2639 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2640 		sc->sc_ios = 0;
   2641 	}
   2642 	if (sc->sc_flashs) {
   2643 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2644 		sc->sc_flashs = 0;
   2645 	}
   2646 
   2647 	if (sc->sc_core_lock)
   2648 		mutex_obj_free(sc->sc_core_lock);
   2649 
   2650 	return 0;
   2651 }
   2652 
   2653 static bool
   2654 wm_suspend(device_t self, const pmf_qual_t *qual)
   2655 {
   2656 	struct wm_softc *sc = device_private(self);
   2657 
   2658 	wm_release_manageability(sc);
   2659 	wm_release_hw_control(sc);
   2660 #ifdef WM_WOL
   2661 	wm_enable_wakeup(sc);
   2662 #endif
   2663 
   2664 	return true;
   2665 }
   2666 
   2667 static bool
   2668 wm_resume(device_t self, const pmf_qual_t *qual)
   2669 {
   2670 	struct wm_softc *sc = device_private(self);
   2671 
   2672 	wm_init_manageability(sc);
   2673 
   2674 	return true;
   2675 }
   2676 
   2677 /*
   2678  * wm_watchdog:		[ifnet interface function]
   2679  *
   2680  *	Watchdog timer handler.
   2681  */
   2682 static void
   2683 wm_watchdog(struct ifnet *ifp)
   2684 {
   2685 	int qid;
   2686 	struct wm_softc *sc = ifp->if_softc;
   2687 
   2688 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2689 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2690 
   2691 		wm_watchdog_txq(ifp, txq);
   2692 	}
   2693 
   2694 	/* Reset the interface. */
   2695 	(void) wm_init(ifp);
   2696 
   2697 	/*
   2698 	 * There are still some upper layer processing which call
   2699 	 * ifp->if_start(). e.g. ALTQ
   2700 	 */
   2701 	/* Try to get more packets going. */
   2702 	ifp->if_start(ifp);
   2703 }
   2704 
   2705 static void
   2706 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2707 {
   2708 	struct wm_softc *sc = ifp->if_softc;
   2709 
   2710 	/*
   2711 	 * Since we're using delayed interrupts, sweep up
   2712 	 * before we report an error.
   2713 	 */
   2714 	mutex_enter(txq->txq_lock);
   2715 	wm_txeof(sc, txq);
   2716 	mutex_exit(txq->txq_lock);
   2717 
   2718 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2719 #ifdef WM_DEBUG
   2720 		int i, j;
   2721 		struct wm_txsoft *txs;
   2722 #endif
   2723 		log(LOG_ERR,
   2724 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2725 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2726 		    txq->txq_next);
   2727 		ifp->if_oerrors++;
   2728 #ifdef WM_DEBUG
   2729 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2730 		    i = WM_NEXTTXS(txq, i)) {
   2731 		    txs = &txq->txq_soft[i];
   2732 		    printf("txs %d tx %d -> %d\n",
   2733 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2734 		    for (j = txs->txs_firstdesc; ;
   2735 			j = WM_NEXTTX(txq, j)) {
   2736 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2737 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2738 			printf("\t %#08x%08x\n",
   2739 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2740 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2741 			if (j == txs->txs_lastdesc)
   2742 				break;
   2743 			}
   2744 		}
   2745 #endif
   2746 	}
   2747 }
   2748 
   2749 /*
   2750  * wm_tick:
   2751  *
   2752  *	One second timer, used to check link status, sweep up
   2753  *	completed transmit jobs, etc.
   2754  */
   2755 static void
   2756 wm_tick(void *arg)
   2757 {
   2758 	struct wm_softc *sc = arg;
   2759 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2760 #ifndef WM_MPSAFE
   2761 	int s = splnet();
   2762 #endif
   2763 
   2764 	WM_CORE_LOCK(sc);
   2765 
   2766 	if (sc->sc_stopping)
   2767 		goto out;
   2768 
   2769 	if (sc->sc_type >= WM_T_82542_2_1) {
   2770 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2771 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2772 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2773 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2774 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2775 	}
   2776 
   2777 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2778 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2779 	    + CSR_READ(sc, WMREG_CRCERRS)
   2780 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2781 	    + CSR_READ(sc, WMREG_SYMERRC)
   2782 	    + CSR_READ(sc, WMREG_RXERRC)
   2783 	    + CSR_READ(sc, WMREG_SEC)
   2784 	    + CSR_READ(sc, WMREG_CEXTERR)
   2785 	    + CSR_READ(sc, WMREG_RLEC);
   2786 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2787 
   2788 	if (sc->sc_flags & WM_F_HAS_MII)
   2789 		mii_tick(&sc->sc_mii);
   2790 	else if ((sc->sc_type >= WM_T_82575)
   2791 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2792 		wm_serdes_tick(sc);
   2793 	else
   2794 		wm_tbi_tick(sc);
   2795 
   2796 out:
   2797 	WM_CORE_UNLOCK(sc);
   2798 #ifndef WM_MPSAFE
   2799 	splx(s);
   2800 #endif
   2801 
   2802 	if (!sc->sc_stopping)
   2803 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2804 }
   2805 
   2806 static int
   2807 wm_ifflags_cb(struct ethercom *ec)
   2808 {
   2809 	struct ifnet *ifp = &ec->ec_if;
   2810 	struct wm_softc *sc = ifp->if_softc;
   2811 	int rc = 0;
   2812 
   2813 	WM_CORE_LOCK(sc);
   2814 
   2815 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2816 	sc->sc_if_flags = ifp->if_flags;
   2817 
   2818 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2819 		rc = ENETRESET;
   2820 		goto out;
   2821 	}
   2822 
   2823 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2824 		wm_set_filter(sc);
   2825 
   2826 	wm_set_vlan(sc);
   2827 
   2828 out:
   2829 	WM_CORE_UNLOCK(sc);
   2830 
   2831 	return rc;
   2832 }
   2833 
   2834 /*
   2835  * wm_ioctl:		[ifnet interface function]
   2836  *
   2837  *	Handle control requests from the operator.
   2838  */
   2839 static int
   2840 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2841 {
   2842 	struct wm_softc *sc = ifp->if_softc;
   2843 	struct ifreq *ifr = (struct ifreq *) data;
   2844 	struct ifaddr *ifa = (struct ifaddr *)data;
   2845 	struct sockaddr_dl *sdl;
   2846 	int s, error;
   2847 
   2848 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2849 		device_xname(sc->sc_dev), __func__));
   2850 
   2851 #ifndef WM_MPSAFE
   2852 	s = splnet();
   2853 #endif
   2854 	switch (cmd) {
   2855 	case SIOCSIFMEDIA:
   2856 	case SIOCGIFMEDIA:
   2857 		WM_CORE_LOCK(sc);
   2858 		/* Flow control requires full-duplex mode. */
   2859 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2860 		    (ifr->ifr_media & IFM_FDX) == 0)
   2861 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2862 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2863 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2864 				/* We can do both TXPAUSE and RXPAUSE. */
   2865 				ifr->ifr_media |=
   2866 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2867 			}
   2868 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2869 		}
   2870 		WM_CORE_UNLOCK(sc);
   2871 #ifdef WM_MPSAFE
   2872 		s = splnet();
   2873 #endif
   2874 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2875 #ifdef WM_MPSAFE
   2876 		splx(s);
   2877 #endif
   2878 		break;
   2879 	case SIOCINITIFADDR:
   2880 		WM_CORE_LOCK(sc);
   2881 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2882 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2883 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2884 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2885 			/* unicast address is first multicast entry */
   2886 			wm_set_filter(sc);
   2887 			error = 0;
   2888 			WM_CORE_UNLOCK(sc);
   2889 			break;
   2890 		}
   2891 		WM_CORE_UNLOCK(sc);
   2892 		/*FALLTHROUGH*/
   2893 	default:
   2894 #ifdef WM_MPSAFE
   2895 		s = splnet();
   2896 #endif
   2897 		/* It may call wm_start, so unlock here */
   2898 		error = ether_ioctl(ifp, cmd, data);
   2899 #ifdef WM_MPSAFE
   2900 		splx(s);
   2901 #endif
   2902 		if (error != ENETRESET)
   2903 			break;
   2904 
   2905 		error = 0;
   2906 
   2907 		if (cmd == SIOCSIFCAP) {
   2908 			error = (*ifp->if_init)(ifp);
   2909 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2910 			;
   2911 		else if (ifp->if_flags & IFF_RUNNING) {
   2912 			/*
   2913 			 * Multicast list has changed; set the hardware filter
   2914 			 * accordingly.
   2915 			 */
   2916 			WM_CORE_LOCK(sc);
   2917 			wm_set_filter(sc);
   2918 			WM_CORE_UNLOCK(sc);
   2919 		}
   2920 		break;
   2921 	}
   2922 
   2923 #ifndef WM_MPSAFE
   2924 	splx(s);
   2925 #endif
   2926 	return error;
   2927 }
   2928 
   2929 /* MAC address related */
   2930 
   2931 /*
   2932  * Get the offset of MAC address and return it.
   2933  * If error occured, use offset 0.
   2934  */
   2935 static uint16_t
   2936 wm_check_alt_mac_addr(struct wm_softc *sc)
   2937 {
   2938 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2939 	uint16_t offset = NVM_OFF_MACADDR;
   2940 
   2941 	/* Try to read alternative MAC address pointer */
   2942 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2943 		return 0;
   2944 
   2945 	/* Check pointer if it's valid or not. */
   2946 	if ((offset == 0x0000) || (offset == 0xffff))
   2947 		return 0;
   2948 
   2949 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2950 	/*
   2951 	 * Check whether alternative MAC address is valid or not.
   2952 	 * Some cards have non 0xffff pointer but those don't use
   2953 	 * alternative MAC address in reality.
   2954 	 *
   2955 	 * Check whether the broadcast bit is set or not.
   2956 	 */
   2957 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2958 		if (((myea[0] & 0xff) & 0x01) == 0)
   2959 			return offset; /* Found */
   2960 
   2961 	/* Not found */
   2962 	return 0;
   2963 }
   2964 
   2965 static int
   2966 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2967 {
   2968 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2969 	uint16_t offset = NVM_OFF_MACADDR;
   2970 	int do_invert = 0;
   2971 
   2972 	switch (sc->sc_type) {
   2973 	case WM_T_82580:
   2974 	case WM_T_I350:
   2975 	case WM_T_I354:
   2976 		/* EEPROM Top Level Partitioning */
   2977 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2978 		break;
   2979 	case WM_T_82571:
   2980 	case WM_T_82575:
   2981 	case WM_T_82576:
   2982 	case WM_T_80003:
   2983 	case WM_T_I210:
   2984 	case WM_T_I211:
   2985 		offset = wm_check_alt_mac_addr(sc);
   2986 		if (offset == 0)
   2987 			if ((sc->sc_funcid & 0x01) == 1)
   2988 				do_invert = 1;
   2989 		break;
   2990 	default:
   2991 		if ((sc->sc_funcid & 0x01) == 1)
   2992 			do_invert = 1;
   2993 		break;
   2994 	}
   2995 
   2996 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2997 		myea) != 0)
   2998 		goto bad;
   2999 
   3000 	enaddr[0] = myea[0] & 0xff;
   3001 	enaddr[1] = myea[0] >> 8;
   3002 	enaddr[2] = myea[1] & 0xff;
   3003 	enaddr[3] = myea[1] >> 8;
   3004 	enaddr[4] = myea[2] & 0xff;
   3005 	enaddr[5] = myea[2] >> 8;
   3006 
   3007 	/*
   3008 	 * Toggle the LSB of the MAC address on the second port
   3009 	 * of some dual port cards.
   3010 	 */
   3011 	if (do_invert != 0)
   3012 		enaddr[5] ^= 1;
   3013 
   3014 	return 0;
   3015 
   3016  bad:
   3017 	return -1;
   3018 }
   3019 
   3020 /*
   3021  * wm_set_ral:
   3022  *
   3023  *	Set an entery in the receive address list.
   3024  */
   3025 static void
   3026 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3027 {
   3028 	uint32_t ral_lo, ral_hi;
   3029 
   3030 	if (enaddr != NULL) {
   3031 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3032 		    (enaddr[3] << 24);
   3033 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3034 		ral_hi |= RAL_AV;
   3035 	} else {
   3036 		ral_lo = 0;
   3037 		ral_hi = 0;
   3038 	}
   3039 
   3040 	if (sc->sc_type >= WM_T_82544) {
   3041 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3042 		    ral_lo);
   3043 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3044 		    ral_hi);
   3045 	} else {
   3046 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3047 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3048 	}
   3049 }
   3050 
   3051 /*
   3052  * wm_mchash:
   3053  *
   3054  *	Compute the hash of the multicast address for the 4096-bit
   3055  *	multicast filter.
   3056  */
   3057 static uint32_t
   3058 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3059 {
   3060 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3061 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3062 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3063 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3064 	uint32_t hash;
   3065 
   3066 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3067 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3068 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3069 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3070 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3071 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3072 		return (hash & 0x3ff);
   3073 	}
   3074 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3075 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3076 
   3077 	return (hash & 0xfff);
   3078 }
   3079 
   3080 /*
   3081  * wm_set_filter:
   3082  *
   3083  *	Set up the receive filter.
   3084  */
   3085 static void
   3086 wm_set_filter(struct wm_softc *sc)
   3087 {
   3088 	struct ethercom *ec = &sc->sc_ethercom;
   3089 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3090 	struct ether_multi *enm;
   3091 	struct ether_multistep step;
   3092 	bus_addr_t mta_reg;
   3093 	uint32_t hash, reg, bit;
   3094 	int i, size, ralmax;
   3095 
   3096 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3097 		device_xname(sc->sc_dev), __func__));
   3098 
   3099 	if (sc->sc_type >= WM_T_82544)
   3100 		mta_reg = WMREG_CORDOVA_MTA;
   3101 	else
   3102 		mta_reg = WMREG_MTA;
   3103 
   3104 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3105 
   3106 	if (ifp->if_flags & IFF_BROADCAST)
   3107 		sc->sc_rctl |= RCTL_BAM;
   3108 	if (ifp->if_flags & IFF_PROMISC) {
   3109 		sc->sc_rctl |= RCTL_UPE;
   3110 		goto allmulti;
   3111 	}
   3112 
   3113 	/*
   3114 	 * Set the station address in the first RAL slot, and
   3115 	 * clear the remaining slots.
   3116 	 */
   3117 	if (sc->sc_type == WM_T_ICH8)
   3118 		size = WM_RAL_TABSIZE_ICH8 -1;
   3119 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3120 	    || (sc->sc_type == WM_T_PCH))
   3121 		size = WM_RAL_TABSIZE_ICH8;
   3122 	else if (sc->sc_type == WM_T_PCH2)
   3123 		size = WM_RAL_TABSIZE_PCH2;
   3124 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3125 		size = WM_RAL_TABSIZE_PCH_LPT;
   3126 	else if (sc->sc_type == WM_T_82575)
   3127 		size = WM_RAL_TABSIZE_82575;
   3128 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3129 		size = WM_RAL_TABSIZE_82576;
   3130 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3131 		size = WM_RAL_TABSIZE_I350;
   3132 	else
   3133 		size = WM_RAL_TABSIZE;
   3134 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3135 
   3136 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3137 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3138 		switch (i) {
   3139 		case 0:
   3140 			/* We can use all entries */
   3141 			ralmax = size;
   3142 			break;
   3143 		case 1:
   3144 			/* Only RAR[0] */
   3145 			ralmax = 1;
   3146 			break;
   3147 		default:
   3148 			/* available SHRA + RAR[0] */
   3149 			ralmax = i + 1;
   3150 		}
   3151 	} else
   3152 		ralmax = size;
   3153 	for (i = 1; i < size; i++) {
   3154 		if (i < ralmax)
   3155 			wm_set_ral(sc, NULL, i);
   3156 	}
   3157 
   3158 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3159 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3160 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3161 	    || (sc->sc_type == WM_T_PCH_SPT))
   3162 		size = WM_ICH8_MC_TABSIZE;
   3163 	else
   3164 		size = WM_MC_TABSIZE;
   3165 	/* Clear out the multicast table. */
   3166 	for (i = 0; i < size; i++)
   3167 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3168 
   3169 	ETHER_FIRST_MULTI(step, ec, enm);
   3170 	while (enm != NULL) {
   3171 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3172 			/*
   3173 			 * We must listen to a range of multicast addresses.
   3174 			 * For now, just accept all multicasts, rather than
   3175 			 * trying to set only those filter bits needed to match
   3176 			 * the range.  (At this time, the only use of address
   3177 			 * ranges is for IP multicast routing, for which the
   3178 			 * range is big enough to require all bits set.)
   3179 			 */
   3180 			goto allmulti;
   3181 		}
   3182 
   3183 		hash = wm_mchash(sc, enm->enm_addrlo);
   3184 
   3185 		reg = (hash >> 5);
   3186 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3187 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3188 		    || (sc->sc_type == WM_T_PCH2)
   3189 		    || (sc->sc_type == WM_T_PCH_LPT)
   3190 		    || (sc->sc_type == WM_T_PCH_SPT))
   3191 			reg &= 0x1f;
   3192 		else
   3193 			reg &= 0x7f;
   3194 		bit = hash & 0x1f;
   3195 
   3196 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3197 		hash |= 1U << bit;
   3198 
   3199 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3200 			/*
   3201 			 * 82544 Errata 9: Certain register cannot be written
   3202 			 * with particular alignments in PCI-X bus operation
   3203 			 * (FCAH, MTA and VFTA).
   3204 			 */
   3205 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3206 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3207 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3208 		} else
   3209 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3210 
   3211 		ETHER_NEXT_MULTI(step, enm);
   3212 	}
   3213 
   3214 	ifp->if_flags &= ~IFF_ALLMULTI;
   3215 	goto setit;
   3216 
   3217  allmulti:
   3218 	ifp->if_flags |= IFF_ALLMULTI;
   3219 	sc->sc_rctl |= RCTL_MPE;
   3220 
   3221  setit:
   3222 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3223 }
   3224 
   3225 /* Reset and init related */
   3226 
   3227 static void
   3228 wm_set_vlan(struct wm_softc *sc)
   3229 {
   3230 
   3231 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3232 		device_xname(sc->sc_dev), __func__));
   3233 
   3234 	/* Deal with VLAN enables. */
   3235 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3236 		sc->sc_ctrl |= CTRL_VME;
   3237 	else
   3238 		sc->sc_ctrl &= ~CTRL_VME;
   3239 
   3240 	/* Write the control registers. */
   3241 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3242 }
   3243 
   3244 static void
   3245 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3246 {
   3247 	uint32_t gcr;
   3248 	pcireg_t ctrl2;
   3249 
   3250 	gcr = CSR_READ(sc, WMREG_GCR);
   3251 
   3252 	/* Only take action if timeout value is defaulted to 0 */
   3253 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3254 		goto out;
   3255 
   3256 	if ((gcr & GCR_CAP_VER2) == 0) {
   3257 		gcr |= GCR_CMPL_TMOUT_10MS;
   3258 		goto out;
   3259 	}
   3260 
   3261 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3262 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3263 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3264 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3265 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3266 
   3267 out:
   3268 	/* Disable completion timeout resend */
   3269 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3270 
   3271 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3272 }
   3273 
   3274 void
   3275 wm_get_auto_rd_done(struct wm_softc *sc)
   3276 {
   3277 	int i;
   3278 
   3279 	/* wait for eeprom to reload */
   3280 	switch (sc->sc_type) {
   3281 	case WM_T_82571:
   3282 	case WM_T_82572:
   3283 	case WM_T_82573:
   3284 	case WM_T_82574:
   3285 	case WM_T_82583:
   3286 	case WM_T_82575:
   3287 	case WM_T_82576:
   3288 	case WM_T_82580:
   3289 	case WM_T_I350:
   3290 	case WM_T_I354:
   3291 	case WM_T_I210:
   3292 	case WM_T_I211:
   3293 	case WM_T_80003:
   3294 	case WM_T_ICH8:
   3295 	case WM_T_ICH9:
   3296 		for (i = 0; i < 10; i++) {
   3297 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3298 				break;
   3299 			delay(1000);
   3300 		}
   3301 		if (i == 10) {
   3302 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3303 			    "complete\n", device_xname(sc->sc_dev));
   3304 		}
   3305 		break;
   3306 	default:
   3307 		break;
   3308 	}
   3309 }
   3310 
   3311 void
   3312 wm_lan_init_done(struct wm_softc *sc)
   3313 {
   3314 	uint32_t reg = 0;
   3315 	int i;
   3316 
   3317 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3318 		device_xname(sc->sc_dev), __func__));
   3319 
   3320 	/* Wait for eeprom to reload */
   3321 	switch (sc->sc_type) {
   3322 	case WM_T_ICH10:
   3323 	case WM_T_PCH:
   3324 	case WM_T_PCH2:
   3325 	case WM_T_PCH_LPT:
   3326 	case WM_T_PCH_SPT:
   3327 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3328 			reg = CSR_READ(sc, WMREG_STATUS);
   3329 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3330 				break;
   3331 			delay(100);
   3332 		}
   3333 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3334 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3335 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3336 		}
   3337 		break;
   3338 	default:
   3339 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3340 		    __func__);
   3341 		break;
   3342 	}
   3343 
   3344 	reg &= ~STATUS_LAN_INIT_DONE;
   3345 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3346 }
   3347 
   3348 void
   3349 wm_get_cfg_done(struct wm_softc *sc)
   3350 {
   3351 	int mask;
   3352 	uint32_t reg;
   3353 	int i;
   3354 
   3355 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3356 		device_xname(sc->sc_dev), __func__));
   3357 
   3358 	/* Wait for eeprom to reload */
   3359 	switch (sc->sc_type) {
   3360 	case WM_T_82542_2_0:
   3361 	case WM_T_82542_2_1:
   3362 		/* null */
   3363 		break;
   3364 	case WM_T_82543:
   3365 	case WM_T_82544:
   3366 	case WM_T_82540:
   3367 	case WM_T_82545:
   3368 	case WM_T_82545_3:
   3369 	case WM_T_82546:
   3370 	case WM_T_82546_3:
   3371 	case WM_T_82541:
   3372 	case WM_T_82541_2:
   3373 	case WM_T_82547:
   3374 	case WM_T_82547_2:
   3375 	case WM_T_82573:
   3376 	case WM_T_82574:
   3377 	case WM_T_82583:
   3378 		/* generic */
   3379 		delay(10*1000);
   3380 		break;
   3381 	case WM_T_80003:
   3382 	case WM_T_82571:
   3383 	case WM_T_82572:
   3384 	case WM_T_82575:
   3385 	case WM_T_82576:
   3386 	case WM_T_82580:
   3387 	case WM_T_I350:
   3388 	case WM_T_I354:
   3389 	case WM_T_I210:
   3390 	case WM_T_I211:
   3391 		if (sc->sc_type == WM_T_82571) {
   3392 			/* Only 82571 shares port 0 */
   3393 			mask = EEMNGCTL_CFGDONE_0;
   3394 		} else
   3395 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3396 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3397 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3398 				break;
   3399 			delay(1000);
   3400 		}
   3401 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3402 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3403 				device_xname(sc->sc_dev), __func__));
   3404 		}
   3405 		break;
   3406 	case WM_T_ICH8:
   3407 	case WM_T_ICH9:
   3408 	case WM_T_ICH10:
   3409 	case WM_T_PCH:
   3410 	case WM_T_PCH2:
   3411 	case WM_T_PCH_LPT:
   3412 	case WM_T_PCH_SPT:
   3413 		delay(10*1000);
   3414 		if (sc->sc_type >= WM_T_ICH10)
   3415 			wm_lan_init_done(sc);
   3416 		else
   3417 			wm_get_auto_rd_done(sc);
   3418 
   3419 		reg = CSR_READ(sc, WMREG_STATUS);
   3420 		if ((reg & STATUS_PHYRA) != 0)
   3421 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3422 		break;
   3423 	default:
   3424 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3425 		    __func__);
   3426 		break;
   3427 	}
   3428 }
   3429 
   3430 /* Init hardware bits */
   3431 void
   3432 wm_initialize_hardware_bits(struct wm_softc *sc)
   3433 {
   3434 	uint32_t tarc0, tarc1, reg;
   3435 
   3436 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3437 		device_xname(sc->sc_dev), __func__));
   3438 
   3439 	/* For 82571 variant, 80003 and ICHs */
   3440 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3441 	    || (sc->sc_type >= WM_T_80003)) {
   3442 
   3443 		/* Transmit Descriptor Control 0 */
   3444 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3445 		reg |= TXDCTL_COUNT_DESC;
   3446 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3447 
   3448 		/* Transmit Descriptor Control 1 */
   3449 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3450 		reg |= TXDCTL_COUNT_DESC;
   3451 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3452 
   3453 		/* TARC0 */
   3454 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3455 		switch (sc->sc_type) {
   3456 		case WM_T_82571:
   3457 		case WM_T_82572:
   3458 		case WM_T_82573:
   3459 		case WM_T_82574:
   3460 		case WM_T_82583:
   3461 		case WM_T_80003:
   3462 			/* Clear bits 30..27 */
   3463 			tarc0 &= ~__BITS(30, 27);
   3464 			break;
   3465 		default:
   3466 			break;
   3467 		}
   3468 
   3469 		switch (sc->sc_type) {
   3470 		case WM_T_82571:
   3471 		case WM_T_82572:
   3472 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3473 
   3474 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3475 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3476 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3477 			/* 8257[12] Errata No.7 */
   3478 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3479 
   3480 			/* TARC1 bit 28 */
   3481 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3482 				tarc1 &= ~__BIT(28);
   3483 			else
   3484 				tarc1 |= __BIT(28);
   3485 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3486 
   3487 			/*
   3488 			 * 8257[12] Errata No.13
   3489 			 * Disable Dyamic Clock Gating.
   3490 			 */
   3491 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3492 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3493 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3494 			break;
   3495 		case WM_T_82573:
   3496 		case WM_T_82574:
   3497 		case WM_T_82583:
   3498 			if ((sc->sc_type == WM_T_82574)
   3499 			    || (sc->sc_type == WM_T_82583))
   3500 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3501 
   3502 			/* Extended Device Control */
   3503 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3504 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3505 			reg |= __BIT(22);	/* Set bit 22 */
   3506 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3507 
   3508 			/* Device Control */
   3509 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3510 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3511 
   3512 			/* PCIe Control Register */
   3513 			/*
   3514 			 * 82573 Errata (unknown).
   3515 			 *
   3516 			 * 82574 Errata 25 and 82583 Errata 12
   3517 			 * "Dropped Rx Packets":
   3518 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3519 			 */
   3520 			reg = CSR_READ(sc, WMREG_GCR);
   3521 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3522 			CSR_WRITE(sc, WMREG_GCR, reg);
   3523 
   3524 			if ((sc->sc_type == WM_T_82574)
   3525 			    || (sc->sc_type == WM_T_82583)) {
   3526 				/*
   3527 				 * Document says this bit must be set for
   3528 				 * proper operation.
   3529 				 */
   3530 				reg = CSR_READ(sc, WMREG_GCR);
   3531 				reg |= __BIT(22);
   3532 				CSR_WRITE(sc, WMREG_GCR, reg);
   3533 
   3534 				/*
   3535 				 * Apply workaround for hardware errata
   3536 				 * documented in errata docs Fixes issue where
   3537 				 * some error prone or unreliable PCIe
   3538 				 * completions are occurring, particularly
   3539 				 * with ASPM enabled. Without fix, issue can
   3540 				 * cause Tx timeouts.
   3541 				 */
   3542 				reg = CSR_READ(sc, WMREG_GCR2);
   3543 				reg |= __BIT(0);
   3544 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3545 			}
   3546 			break;
   3547 		case WM_T_80003:
   3548 			/* TARC0 */
   3549 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3550 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3551 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3552 
   3553 			/* TARC1 bit 28 */
   3554 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3555 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3556 				tarc1 &= ~__BIT(28);
   3557 			else
   3558 				tarc1 |= __BIT(28);
   3559 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3560 			break;
   3561 		case WM_T_ICH8:
   3562 		case WM_T_ICH9:
   3563 		case WM_T_ICH10:
   3564 		case WM_T_PCH:
   3565 		case WM_T_PCH2:
   3566 		case WM_T_PCH_LPT:
   3567 		case WM_T_PCH_SPT:
   3568 			/* TARC0 */
   3569 			if ((sc->sc_type == WM_T_ICH8)
   3570 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3571 				/* Set TARC0 bits 29 and 28 */
   3572 				tarc0 |= __BITS(29, 28);
   3573 			}
   3574 			/* Set TARC0 bits 23,24,26,27 */
   3575 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3576 
   3577 			/* CTRL_EXT */
   3578 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3579 			reg |= __BIT(22);	/* Set bit 22 */
   3580 			/*
   3581 			 * Enable PHY low-power state when MAC is at D3
   3582 			 * w/o WoL
   3583 			 */
   3584 			if (sc->sc_type >= WM_T_PCH)
   3585 				reg |= CTRL_EXT_PHYPDEN;
   3586 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3587 
   3588 			/* TARC1 */
   3589 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3590 			/* bit 28 */
   3591 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3592 				tarc1 &= ~__BIT(28);
   3593 			else
   3594 				tarc1 |= __BIT(28);
   3595 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3596 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3597 
   3598 			/* Device Status */
   3599 			if (sc->sc_type == WM_T_ICH8) {
   3600 				reg = CSR_READ(sc, WMREG_STATUS);
   3601 				reg &= ~__BIT(31);
   3602 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3603 
   3604 			}
   3605 
   3606 			/* IOSFPC */
   3607 			if (sc->sc_type == WM_T_PCH_SPT) {
   3608 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3609 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3610 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3611 			}
   3612 			/*
   3613 			 * Work-around descriptor data corruption issue during
   3614 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3615 			 * capability.
   3616 			 */
   3617 			reg = CSR_READ(sc, WMREG_RFCTL);
   3618 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3619 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3620 			break;
   3621 		default:
   3622 			break;
   3623 		}
   3624 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3625 
   3626 		/*
   3627 		 * 8257[12] Errata No.52 and some others.
   3628 		 * Avoid RSS Hash Value bug.
   3629 		 */
   3630 		switch (sc->sc_type) {
   3631 		case WM_T_82571:
   3632 		case WM_T_82572:
   3633 		case WM_T_82573:
   3634 		case WM_T_80003:
   3635 		case WM_T_ICH8:
   3636 			reg = CSR_READ(sc, WMREG_RFCTL);
   3637 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3638 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3639 			break;
   3640 		default:
   3641 			break;
   3642 		}
   3643 	}
   3644 }
   3645 
   3646 static uint32_t
   3647 wm_rxpbs_adjust_82580(uint32_t val)
   3648 {
   3649 	uint32_t rv = 0;
   3650 
   3651 	if (val < __arraycount(wm_82580_rxpbs_table))
   3652 		rv = wm_82580_rxpbs_table[val];
   3653 
   3654 	return rv;
   3655 }
   3656 
   3657 /*
   3658  * wm_reset:
   3659  *
   3660  *	Reset the i82542 chip.
   3661  */
   3662 static void
   3663 wm_reset(struct wm_softc *sc)
   3664 {
   3665 	int phy_reset = 0;
   3666 	int i, error = 0;
   3667 	uint32_t reg, mask;
   3668 
   3669 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3670 		device_xname(sc->sc_dev), __func__));
   3671 	KASSERT(sc->sc_type != 0);
   3672 
   3673 	/*
   3674 	 * Allocate on-chip memory according to the MTU size.
   3675 	 * The Packet Buffer Allocation register must be written
   3676 	 * before the chip is reset.
   3677 	 */
   3678 	switch (sc->sc_type) {
   3679 	case WM_T_82547:
   3680 	case WM_T_82547_2:
   3681 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3682 		    PBA_22K : PBA_30K;
   3683 		for (i = 0; i < sc->sc_nqueues; i++) {
   3684 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3685 			txq->txq_fifo_head = 0;
   3686 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3687 			txq->txq_fifo_size =
   3688 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3689 			txq->txq_fifo_stall = 0;
   3690 		}
   3691 		break;
   3692 	case WM_T_82571:
   3693 	case WM_T_82572:
   3694 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3695 	case WM_T_80003:
   3696 		sc->sc_pba = PBA_32K;
   3697 		break;
   3698 	case WM_T_82573:
   3699 		sc->sc_pba = PBA_12K;
   3700 		break;
   3701 	case WM_T_82574:
   3702 	case WM_T_82583:
   3703 		sc->sc_pba = PBA_20K;
   3704 		break;
   3705 	case WM_T_82576:
   3706 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3707 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3708 		break;
   3709 	case WM_T_82580:
   3710 	case WM_T_I350:
   3711 	case WM_T_I354:
   3712 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3713 		break;
   3714 	case WM_T_I210:
   3715 	case WM_T_I211:
   3716 		sc->sc_pba = PBA_34K;
   3717 		break;
   3718 	case WM_T_ICH8:
   3719 		/* Workaround for a bit corruption issue in FIFO memory */
   3720 		sc->sc_pba = PBA_8K;
   3721 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3722 		break;
   3723 	case WM_T_ICH9:
   3724 	case WM_T_ICH10:
   3725 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3726 		    PBA_14K : PBA_10K;
   3727 		break;
   3728 	case WM_T_PCH:
   3729 	case WM_T_PCH2:
   3730 	case WM_T_PCH_LPT:
   3731 	case WM_T_PCH_SPT:
   3732 		sc->sc_pba = PBA_26K;
   3733 		break;
   3734 	default:
   3735 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3736 		    PBA_40K : PBA_48K;
   3737 		break;
   3738 	}
   3739 	/*
   3740 	 * Only old or non-multiqueue devices have the PBA register
   3741 	 * XXX Need special handling for 82575.
   3742 	 */
   3743 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3744 	    || (sc->sc_type == WM_T_82575))
   3745 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3746 
   3747 	/* Prevent the PCI-E bus from sticking */
   3748 	if (sc->sc_flags & WM_F_PCIE) {
   3749 		int timeout = 800;
   3750 
   3751 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3752 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3753 
   3754 		while (timeout--) {
   3755 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3756 			    == 0)
   3757 				break;
   3758 			delay(100);
   3759 		}
   3760 	}
   3761 
   3762 	/* Set the completion timeout for interface */
   3763 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3764 	    || (sc->sc_type == WM_T_82580)
   3765 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3766 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3767 		wm_set_pcie_completion_timeout(sc);
   3768 
   3769 	/* Clear interrupt */
   3770 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3771 	if (sc->sc_nintrs > 1) {
   3772 		if (sc->sc_type != WM_T_82574) {
   3773 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3774 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3775 		} else {
   3776 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3777 		}
   3778 	}
   3779 
   3780 	/* Stop the transmit and receive processes. */
   3781 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3782 	sc->sc_rctl &= ~RCTL_EN;
   3783 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3784 	CSR_WRITE_FLUSH(sc);
   3785 
   3786 	/* XXX set_tbi_sbp_82543() */
   3787 
   3788 	delay(10*1000);
   3789 
   3790 	/* Must acquire the MDIO ownership before MAC reset */
   3791 	switch (sc->sc_type) {
   3792 	case WM_T_82573:
   3793 	case WM_T_82574:
   3794 	case WM_T_82583:
   3795 		error = wm_get_hw_semaphore_82573(sc);
   3796 		break;
   3797 	default:
   3798 		break;
   3799 	}
   3800 
   3801 	/*
   3802 	 * 82541 Errata 29? & 82547 Errata 28?
   3803 	 * See also the description about PHY_RST bit in CTRL register
   3804 	 * in 8254x_GBe_SDM.pdf.
   3805 	 */
   3806 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3807 		CSR_WRITE(sc, WMREG_CTRL,
   3808 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3809 		CSR_WRITE_FLUSH(sc);
   3810 		delay(5000);
   3811 	}
   3812 
   3813 	switch (sc->sc_type) {
   3814 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3815 	case WM_T_82541:
   3816 	case WM_T_82541_2:
   3817 	case WM_T_82547:
   3818 	case WM_T_82547_2:
   3819 		/*
   3820 		 * On some chipsets, a reset through a memory-mapped write
   3821 		 * cycle can cause the chip to reset before completing the
   3822 		 * write cycle.  This causes major headache that can be
   3823 		 * avoided by issuing the reset via indirect register writes
   3824 		 * through I/O space.
   3825 		 *
   3826 		 * So, if we successfully mapped the I/O BAR at attach time,
   3827 		 * use that.  Otherwise, try our luck with a memory-mapped
   3828 		 * reset.
   3829 		 */
   3830 		if (sc->sc_flags & WM_F_IOH_VALID)
   3831 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3832 		else
   3833 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3834 		break;
   3835 	case WM_T_82545_3:
   3836 	case WM_T_82546_3:
   3837 		/* Use the shadow control register on these chips. */
   3838 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3839 		break;
   3840 	case WM_T_80003:
   3841 		mask = swfwphysem[sc->sc_funcid];
   3842 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3843 		wm_get_swfw_semaphore(sc, mask);
   3844 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3845 		wm_put_swfw_semaphore(sc, mask);
   3846 		break;
   3847 	case WM_T_ICH8:
   3848 	case WM_T_ICH9:
   3849 	case WM_T_ICH10:
   3850 	case WM_T_PCH:
   3851 	case WM_T_PCH2:
   3852 	case WM_T_PCH_LPT:
   3853 	case WM_T_PCH_SPT:
   3854 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3855 		if (wm_phy_resetisblocked(sc) == false) {
   3856 			/*
   3857 			 * Gate automatic PHY configuration by hardware on
   3858 			 * non-managed 82579
   3859 			 */
   3860 			if ((sc->sc_type == WM_T_PCH2)
   3861 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3862 				== 0))
   3863 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3864 
   3865 			reg |= CTRL_PHY_RESET;
   3866 			phy_reset = 1;
   3867 		} else
   3868 			printf("XXX reset is blocked!!!\n");
   3869 		wm_get_swfwhw_semaphore(sc);
   3870 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3871 		/* Don't insert a completion barrier when reset */
   3872 		delay(20*1000);
   3873 		wm_put_swfwhw_semaphore(sc);
   3874 		break;
   3875 	case WM_T_82580:
   3876 	case WM_T_I350:
   3877 	case WM_T_I354:
   3878 	case WM_T_I210:
   3879 	case WM_T_I211:
   3880 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3881 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3882 			CSR_WRITE_FLUSH(sc);
   3883 		delay(5000);
   3884 		break;
   3885 	case WM_T_82542_2_0:
   3886 	case WM_T_82542_2_1:
   3887 	case WM_T_82543:
   3888 	case WM_T_82540:
   3889 	case WM_T_82545:
   3890 	case WM_T_82546:
   3891 	case WM_T_82571:
   3892 	case WM_T_82572:
   3893 	case WM_T_82573:
   3894 	case WM_T_82574:
   3895 	case WM_T_82575:
   3896 	case WM_T_82576:
   3897 	case WM_T_82583:
   3898 	default:
   3899 		/* Everything else can safely use the documented method. */
   3900 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3901 		break;
   3902 	}
   3903 
   3904 	/* Must release the MDIO ownership after MAC reset */
   3905 	switch (sc->sc_type) {
   3906 	case WM_T_82573:
   3907 	case WM_T_82574:
   3908 	case WM_T_82583:
   3909 		if (error == 0)
   3910 			wm_put_hw_semaphore_82573(sc);
   3911 		break;
   3912 	default:
   3913 		break;
   3914 	}
   3915 
   3916 	if (phy_reset != 0) {
   3917 		wm_get_cfg_done(sc);
   3918 		delay(10 * 1000);
   3919 		if (sc->sc_type >= WM_T_PCH) {
   3920 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3921 			    BM_PORT_GEN_CFG);
   3922 			reg &= ~BM_WUC_HOST_WU_BIT;
   3923 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   3924 			    BM_PORT_GEN_CFG, reg);
   3925 		}
   3926 	}
   3927 
   3928 	/* reload EEPROM */
   3929 	switch (sc->sc_type) {
   3930 	case WM_T_82542_2_0:
   3931 	case WM_T_82542_2_1:
   3932 	case WM_T_82543:
   3933 	case WM_T_82544:
   3934 		delay(10);
   3935 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3936 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3937 		CSR_WRITE_FLUSH(sc);
   3938 		delay(2000);
   3939 		break;
   3940 	case WM_T_82540:
   3941 	case WM_T_82545:
   3942 	case WM_T_82545_3:
   3943 	case WM_T_82546:
   3944 	case WM_T_82546_3:
   3945 		delay(5*1000);
   3946 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3947 		break;
   3948 	case WM_T_82541:
   3949 	case WM_T_82541_2:
   3950 	case WM_T_82547:
   3951 	case WM_T_82547_2:
   3952 		delay(20000);
   3953 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3954 		break;
   3955 	case WM_T_82571:
   3956 	case WM_T_82572:
   3957 	case WM_T_82573:
   3958 	case WM_T_82574:
   3959 	case WM_T_82583:
   3960 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3961 			delay(10);
   3962 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3963 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3964 			CSR_WRITE_FLUSH(sc);
   3965 		}
   3966 		/* check EECD_EE_AUTORD */
   3967 		wm_get_auto_rd_done(sc);
   3968 		/*
   3969 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3970 		 * is set.
   3971 		 */
   3972 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3973 		    || (sc->sc_type == WM_T_82583))
   3974 			delay(25*1000);
   3975 		break;
   3976 	case WM_T_82575:
   3977 	case WM_T_82576:
   3978 	case WM_T_82580:
   3979 	case WM_T_I350:
   3980 	case WM_T_I354:
   3981 	case WM_T_I210:
   3982 	case WM_T_I211:
   3983 	case WM_T_80003:
   3984 		/* check EECD_EE_AUTORD */
   3985 		wm_get_auto_rd_done(sc);
   3986 		break;
   3987 	case WM_T_ICH8:
   3988 	case WM_T_ICH9:
   3989 	case WM_T_ICH10:
   3990 	case WM_T_PCH:
   3991 	case WM_T_PCH2:
   3992 	case WM_T_PCH_LPT:
   3993 	case WM_T_PCH_SPT:
   3994 		break;
   3995 	default:
   3996 		panic("%s: unknown type\n", __func__);
   3997 	}
   3998 
   3999 	/* Check whether EEPROM is present or not */
   4000 	switch (sc->sc_type) {
   4001 	case WM_T_82575:
   4002 	case WM_T_82576:
   4003 	case WM_T_82580:
   4004 	case WM_T_I350:
   4005 	case WM_T_I354:
   4006 	case WM_T_ICH8:
   4007 	case WM_T_ICH9:
   4008 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4009 			/* Not found */
   4010 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4011 			if (sc->sc_type == WM_T_82575)
   4012 				wm_reset_init_script_82575(sc);
   4013 		}
   4014 		break;
   4015 	default:
   4016 		break;
   4017 	}
   4018 
   4019 	if ((sc->sc_type == WM_T_82580)
   4020 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4021 		/* clear global device reset status bit */
   4022 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4023 	}
   4024 
   4025 	/* Clear any pending interrupt events. */
   4026 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4027 	reg = CSR_READ(sc, WMREG_ICR);
   4028 	if (sc->sc_nintrs > 1) {
   4029 		if (sc->sc_type != WM_T_82574) {
   4030 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4031 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4032 		} else
   4033 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4034 	}
   4035 
   4036 	/* reload sc_ctrl */
   4037 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4038 
   4039 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4040 		wm_set_eee_i350(sc);
   4041 
   4042 	/* dummy read from WUC */
   4043 	if (sc->sc_type == WM_T_PCH)
   4044 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4045 	/*
   4046 	 * For PCH, this write will make sure that any noise will be detected
   4047 	 * as a CRC error and be dropped rather than show up as a bad packet
   4048 	 * to the DMA engine
   4049 	 */
   4050 	if (sc->sc_type == WM_T_PCH)
   4051 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4052 
   4053 	if (sc->sc_type >= WM_T_82544)
   4054 		CSR_WRITE(sc, WMREG_WUC, 0);
   4055 
   4056 	wm_reset_mdicnfg_82580(sc);
   4057 
   4058 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4059 		wm_pll_workaround_i210(sc);
   4060 }
   4061 
   4062 /*
   4063  * wm_add_rxbuf:
   4064  *
   4065  *	Add a receive buffer to the indiciated descriptor.
   4066  */
   4067 static int
   4068 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4069 {
   4070 	struct wm_softc *sc = rxq->rxq_sc;
   4071 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4072 	struct mbuf *m;
   4073 	int error;
   4074 
   4075 	KASSERT(mutex_owned(rxq->rxq_lock));
   4076 
   4077 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4078 	if (m == NULL)
   4079 		return ENOBUFS;
   4080 
   4081 	MCLGET(m, M_DONTWAIT);
   4082 	if ((m->m_flags & M_EXT) == 0) {
   4083 		m_freem(m);
   4084 		return ENOBUFS;
   4085 	}
   4086 
   4087 	if (rxs->rxs_mbuf != NULL)
   4088 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4089 
   4090 	rxs->rxs_mbuf = m;
   4091 
   4092 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4093 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4094 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4095 	if (error) {
   4096 		/* XXX XXX XXX */
   4097 		aprint_error_dev(sc->sc_dev,
   4098 		    "unable to load rx DMA map %d, error = %d\n",
   4099 		    idx, error);
   4100 		panic("wm_add_rxbuf");
   4101 	}
   4102 
   4103 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4104 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4105 
   4106 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4107 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4108 			wm_init_rxdesc(rxq, idx);
   4109 	} else
   4110 		wm_init_rxdesc(rxq, idx);
   4111 
   4112 	return 0;
   4113 }
   4114 
   4115 /*
   4116  * wm_rxdrain:
   4117  *
   4118  *	Drain the receive queue.
   4119  */
   4120 static void
   4121 wm_rxdrain(struct wm_rxqueue *rxq)
   4122 {
   4123 	struct wm_softc *sc = rxq->rxq_sc;
   4124 	struct wm_rxsoft *rxs;
   4125 	int i;
   4126 
   4127 	KASSERT(mutex_owned(rxq->rxq_lock));
   4128 
   4129 	for (i = 0; i < WM_NRXDESC; i++) {
   4130 		rxs = &rxq->rxq_soft[i];
   4131 		if (rxs->rxs_mbuf != NULL) {
   4132 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4133 			m_freem(rxs->rxs_mbuf);
   4134 			rxs->rxs_mbuf = NULL;
   4135 		}
   4136 	}
   4137 }
   4138 
   4139 
   4140 /*
   4141  * XXX copy from FreeBSD's sys/net/rss_config.c
   4142  */
   4143 /*
   4144  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4145  * effectiveness may be limited by algorithm choice and available entropy
   4146  * during the boot.
   4147  *
   4148  * XXXRW: And that we don't randomize it yet!
   4149  *
   4150  * This is the default Microsoft RSS specification key which is also
   4151  * the Chelsio T5 firmware default key.
   4152  */
   4153 #define RSS_KEYSIZE 40
   4154 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4155 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4156 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4157 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4158 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4159 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4160 };
   4161 
   4162 /*
   4163  * Caller must pass an array of size sizeof(rss_key).
   4164  *
   4165  * XXX
   4166  * As if_ixgbe may use this function, this function should not be
   4167  * if_wm specific function.
   4168  */
   4169 static void
   4170 wm_rss_getkey(uint8_t *key)
   4171 {
   4172 
   4173 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4174 }
   4175 
   4176 /*
   4177  * Setup registers for RSS.
   4178  *
   4179  * XXX not yet VMDq support
   4180  */
   4181 static void
   4182 wm_init_rss(struct wm_softc *sc)
   4183 {
   4184 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4185 	int i;
   4186 
   4187 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4188 
   4189 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4190 		int qid, reta_ent;
   4191 
   4192 		qid  = i % sc->sc_nqueues;
   4193 		switch(sc->sc_type) {
   4194 		case WM_T_82574:
   4195 			reta_ent = __SHIFTIN(qid,
   4196 			    RETA_ENT_QINDEX_MASK_82574);
   4197 			break;
   4198 		case WM_T_82575:
   4199 			reta_ent = __SHIFTIN(qid,
   4200 			    RETA_ENT_QINDEX1_MASK_82575);
   4201 			break;
   4202 		default:
   4203 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4204 			break;
   4205 		}
   4206 
   4207 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4208 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4209 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4210 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4211 	}
   4212 
   4213 	wm_rss_getkey((uint8_t *)rss_key);
   4214 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4215 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4216 
   4217 	if (sc->sc_type == WM_T_82574)
   4218 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4219 	else
   4220 		mrqc = MRQC_ENABLE_RSS_MQ;
   4221 
   4222 	/* XXXX
   4223 	 * The same as FreeBSD igb.
   4224 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4225 	 */
   4226 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4227 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4228 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4229 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4230 
   4231 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4232 }
   4233 
   4234 /*
   4235  * Adjust TX and RX queue numbers which the system actulally uses.
   4236  *
   4237  * The numbers are affected by below parameters.
   4238  *     - The nubmer of hardware queues
   4239  *     - The number of MSI-X vectors (= "nvectors" argument)
   4240  *     - ncpu
   4241  */
   4242 static void
   4243 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4244 {
   4245 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4246 
   4247 	if (nvectors < 2) {
   4248 		sc->sc_nqueues = 1;
   4249 		return;
   4250 	}
   4251 
   4252 	switch(sc->sc_type) {
   4253 	case WM_T_82572:
   4254 		hw_ntxqueues = 2;
   4255 		hw_nrxqueues = 2;
   4256 		break;
   4257 	case WM_T_82574:
   4258 		hw_ntxqueues = 2;
   4259 		hw_nrxqueues = 2;
   4260 		break;
   4261 	case WM_T_82575:
   4262 		hw_ntxqueues = 4;
   4263 		hw_nrxqueues = 4;
   4264 		break;
   4265 	case WM_T_82576:
   4266 		hw_ntxqueues = 16;
   4267 		hw_nrxqueues = 16;
   4268 		break;
   4269 	case WM_T_82580:
   4270 	case WM_T_I350:
   4271 	case WM_T_I354:
   4272 		hw_ntxqueues = 8;
   4273 		hw_nrxqueues = 8;
   4274 		break;
   4275 	case WM_T_I210:
   4276 		hw_ntxqueues = 4;
   4277 		hw_nrxqueues = 4;
   4278 		break;
   4279 	case WM_T_I211:
   4280 		hw_ntxqueues = 2;
   4281 		hw_nrxqueues = 2;
   4282 		break;
   4283 		/*
   4284 		 * As below ethernet controllers does not support MSI-X,
   4285 		 * this driver let them not use multiqueue.
   4286 		 *     - WM_T_80003
   4287 		 *     - WM_T_ICH8
   4288 		 *     - WM_T_ICH9
   4289 		 *     - WM_T_ICH10
   4290 		 *     - WM_T_PCH
   4291 		 *     - WM_T_PCH2
   4292 		 *     - WM_T_PCH_LPT
   4293 		 */
   4294 	default:
   4295 		hw_ntxqueues = 1;
   4296 		hw_nrxqueues = 1;
   4297 		break;
   4298 	}
   4299 
   4300 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4301 
   4302 	/*
   4303 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4304 	 * the number of queues used actually.
   4305 	 */
   4306 	if (nvectors < hw_nqueues + 1) {
   4307 		sc->sc_nqueues = nvectors - 1;
   4308 	} else {
   4309 		sc->sc_nqueues = hw_nqueues;
   4310 	}
   4311 
   4312 	/*
   4313 	 * As queues more then cpus cannot improve scaling, we limit
   4314 	 * the number of queues used actually.
   4315 	 */
   4316 	if (ncpu < sc->sc_nqueues)
   4317 		sc->sc_nqueues = ncpu;
   4318 }
   4319 
   4320 /*
   4321  * Both single interrupt MSI and INTx can use this function.
   4322  */
   4323 static int
   4324 wm_setup_legacy(struct wm_softc *sc)
   4325 {
   4326 	pci_chipset_tag_t pc = sc->sc_pc;
   4327 	const char *intrstr = NULL;
   4328 	char intrbuf[PCI_INTRSTR_LEN];
   4329 	int error;
   4330 
   4331 	error = wm_alloc_txrx_queues(sc);
   4332 	if (error) {
   4333 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4334 		    error);
   4335 		return ENOMEM;
   4336 	}
   4337 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4338 	    sizeof(intrbuf));
   4339 #ifdef WM_MPSAFE
   4340 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4341 #endif
   4342 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4343 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4344 	if (sc->sc_ihs[0] == NULL) {
   4345 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4346 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4347 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4348 		return ENOMEM;
   4349 	}
   4350 
   4351 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4352 	sc->sc_nintrs = 1;
   4353 	return 0;
   4354 }
   4355 
   4356 static int
   4357 wm_setup_msix(struct wm_softc *sc)
   4358 {
   4359 	void *vih;
   4360 	kcpuset_t *affinity;
   4361 	int qidx, error, intr_idx, txrx_established;
   4362 	pci_chipset_tag_t pc = sc->sc_pc;
   4363 	const char *intrstr = NULL;
   4364 	char intrbuf[PCI_INTRSTR_LEN];
   4365 	char intr_xname[INTRDEVNAMEBUF];
   4366 
   4367 	if (sc->sc_nqueues < ncpu) {
   4368 		/*
   4369 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4370 		 * interrupts start from CPU#1.
   4371 		 */
   4372 		sc->sc_affinity_offset = 1;
   4373 	} else {
   4374 		/*
   4375 		 * In this case, this device use all CPUs. So, we unify
   4376 		 * affinitied cpu_index to msix vector number for readability.
   4377 		 */
   4378 		sc->sc_affinity_offset = 0;
   4379 	}
   4380 
   4381 	error = wm_alloc_txrx_queues(sc);
   4382 	if (error) {
   4383 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4384 		    error);
   4385 		return ENOMEM;
   4386 	}
   4387 
   4388 	kcpuset_create(&affinity, false);
   4389 	intr_idx = 0;
   4390 
   4391 	/*
   4392 	 * TX and RX
   4393 	 */
   4394 	txrx_established = 0;
   4395 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4396 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4397 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4398 
   4399 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4400 		    sizeof(intrbuf));
   4401 #ifdef WM_MPSAFE
   4402 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4403 		    PCI_INTR_MPSAFE, true);
   4404 #endif
   4405 		memset(intr_xname, 0, sizeof(intr_xname));
   4406 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4407 		    device_xname(sc->sc_dev), qidx);
   4408 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4409 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4410 		if (vih == NULL) {
   4411 			aprint_error_dev(sc->sc_dev,
   4412 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4413 			    intrstr ? " at " : "",
   4414 			    intrstr ? intrstr : "");
   4415 
   4416 			goto fail;
   4417 		}
   4418 		kcpuset_zero(affinity);
   4419 		/* Round-robin affinity */
   4420 		kcpuset_set(affinity, affinity_to);
   4421 		error = interrupt_distribute(vih, affinity, NULL);
   4422 		if (error == 0) {
   4423 			aprint_normal_dev(sc->sc_dev,
   4424 			    "for TX and RX interrupting at %s affinity to %u\n",
   4425 			    intrstr, affinity_to);
   4426 		} else {
   4427 			aprint_normal_dev(sc->sc_dev,
   4428 			    "for TX and RX interrupting at %s\n", intrstr);
   4429 		}
   4430 		sc->sc_ihs[intr_idx] = vih;
   4431 		wmq->wmq_id= qidx;
   4432 		wmq->wmq_intr_idx = intr_idx;
   4433 
   4434 		txrx_established++;
   4435 		intr_idx++;
   4436 	}
   4437 
   4438 	/*
   4439 	 * LINK
   4440 	 */
   4441 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4442 	    sizeof(intrbuf));
   4443 #ifdef WM_MPSAFE
   4444 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4445 #endif
   4446 	memset(intr_xname, 0, sizeof(intr_xname));
   4447 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4448 	    device_xname(sc->sc_dev));
   4449 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4450 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4451 	if (vih == NULL) {
   4452 		aprint_error_dev(sc->sc_dev,
   4453 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4454 		    intrstr ? " at " : "",
   4455 		    intrstr ? intrstr : "");
   4456 
   4457 		goto fail;
   4458 	}
   4459 	/* keep default affinity to LINK interrupt */
   4460 	aprint_normal_dev(sc->sc_dev,
   4461 	    "for LINK interrupting at %s\n", intrstr);
   4462 	sc->sc_ihs[intr_idx] = vih;
   4463 	sc->sc_link_intr_idx = intr_idx;
   4464 
   4465 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4466 	kcpuset_destroy(affinity);
   4467 	return 0;
   4468 
   4469  fail:
   4470 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4471 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4472 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4473 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4474 	}
   4475 
   4476 	kcpuset_destroy(affinity);
   4477 	return ENOMEM;
   4478 }
   4479 
   4480 /*
   4481  * wm_init:		[ifnet interface function]
   4482  *
   4483  *	Initialize the interface.
   4484  */
   4485 static int
   4486 wm_init(struct ifnet *ifp)
   4487 {
   4488 	struct wm_softc *sc = ifp->if_softc;
   4489 	int ret;
   4490 
   4491 	WM_CORE_LOCK(sc);
   4492 	ret = wm_init_locked(ifp);
   4493 	WM_CORE_UNLOCK(sc);
   4494 
   4495 	return ret;
   4496 }
   4497 
   4498 static int
   4499 wm_init_locked(struct ifnet *ifp)
   4500 {
   4501 	struct wm_softc *sc = ifp->if_softc;
   4502 	int i, j, trynum, error = 0;
   4503 	uint32_t reg;
   4504 
   4505 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4506 		device_xname(sc->sc_dev), __func__));
   4507 	KASSERT(WM_CORE_LOCKED(sc));
   4508 
   4509 	/*
   4510 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4511 	 * There is a small but measurable benefit to avoiding the adjusment
   4512 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4513 	 * on such platforms.  One possibility is that the DMA itself is
   4514 	 * slightly more efficient if the front of the entire packet (instead
   4515 	 * of the front of the headers) is aligned.
   4516 	 *
   4517 	 * Note we must always set align_tweak to 0 if we are using
   4518 	 * jumbo frames.
   4519 	 */
   4520 #ifdef __NO_STRICT_ALIGNMENT
   4521 	sc->sc_align_tweak = 0;
   4522 #else
   4523 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4524 		sc->sc_align_tweak = 0;
   4525 	else
   4526 		sc->sc_align_tweak = 2;
   4527 #endif /* __NO_STRICT_ALIGNMENT */
   4528 
   4529 	/* Cancel any pending I/O. */
   4530 	wm_stop_locked(ifp, 0);
   4531 
   4532 	/* update statistics before reset */
   4533 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4534 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4535 
   4536 	/* Reset the chip to a known state. */
   4537 	wm_reset(sc);
   4538 
   4539 	switch (sc->sc_type) {
   4540 	case WM_T_82571:
   4541 	case WM_T_82572:
   4542 	case WM_T_82573:
   4543 	case WM_T_82574:
   4544 	case WM_T_82583:
   4545 	case WM_T_80003:
   4546 	case WM_T_ICH8:
   4547 	case WM_T_ICH9:
   4548 	case WM_T_ICH10:
   4549 	case WM_T_PCH:
   4550 	case WM_T_PCH2:
   4551 	case WM_T_PCH_LPT:
   4552 	case WM_T_PCH_SPT:
   4553 		/* AMT based hardware can now take control from firmware */
   4554 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4555 			wm_get_hw_control(sc);
   4556 		break;
   4557 	default:
   4558 		break;
   4559 	}
   4560 
   4561 	/* Init hardware bits */
   4562 	wm_initialize_hardware_bits(sc);
   4563 
   4564 	/* Reset the PHY. */
   4565 	if (sc->sc_flags & WM_F_HAS_MII)
   4566 		wm_gmii_reset(sc);
   4567 
   4568 	/* Calculate (E)ITR value */
   4569 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4570 		sc->sc_itr = 450;	/* For EITR */
   4571 	} else if (sc->sc_type >= WM_T_82543) {
   4572 		/*
   4573 		 * Set up the interrupt throttling register (units of 256ns)
   4574 		 * Note that a footnote in Intel's documentation says this
   4575 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4576 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4577 		 * that that is also true for the 1024ns units of the other
   4578 		 * interrupt-related timer registers -- so, really, we ought
   4579 		 * to divide this value by 4 when the link speed is low.
   4580 		 *
   4581 		 * XXX implement this division at link speed change!
   4582 		 */
   4583 
   4584 		/*
   4585 		 * For N interrupts/sec, set this value to:
   4586 		 * 1000000000 / (N * 256).  Note that we set the
   4587 		 * absolute and packet timer values to this value
   4588 		 * divided by 4 to get "simple timer" behavior.
   4589 		 */
   4590 
   4591 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4592 	}
   4593 
   4594 	error = wm_init_txrx_queues(sc);
   4595 	if (error)
   4596 		goto out;
   4597 
   4598 	/*
   4599 	 * Clear out the VLAN table -- we don't use it (yet).
   4600 	 */
   4601 	CSR_WRITE(sc, WMREG_VET, 0);
   4602 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4603 		trynum = 10; /* Due to hw errata */
   4604 	else
   4605 		trynum = 1;
   4606 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4607 		for (j = 0; j < trynum; j++)
   4608 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4609 
   4610 	/*
   4611 	 * Set up flow-control parameters.
   4612 	 *
   4613 	 * XXX Values could probably stand some tuning.
   4614 	 */
   4615 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4616 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4617 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4618 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4619 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4620 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4621 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4622 	}
   4623 
   4624 	sc->sc_fcrtl = FCRTL_DFLT;
   4625 	if (sc->sc_type < WM_T_82543) {
   4626 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4627 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4628 	} else {
   4629 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4630 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4631 	}
   4632 
   4633 	if (sc->sc_type == WM_T_80003)
   4634 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4635 	else
   4636 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4637 
   4638 	/* Writes the control register. */
   4639 	wm_set_vlan(sc);
   4640 
   4641 	if (sc->sc_flags & WM_F_HAS_MII) {
   4642 		int val;
   4643 
   4644 		switch (sc->sc_type) {
   4645 		case WM_T_80003:
   4646 		case WM_T_ICH8:
   4647 		case WM_T_ICH9:
   4648 		case WM_T_ICH10:
   4649 		case WM_T_PCH:
   4650 		case WM_T_PCH2:
   4651 		case WM_T_PCH_LPT:
   4652 		case WM_T_PCH_SPT:
   4653 			/*
   4654 			 * Set the mac to wait the maximum time between each
   4655 			 * iteration and increase the max iterations when
   4656 			 * polling the phy; this fixes erroneous timeouts at
   4657 			 * 10Mbps.
   4658 			 */
   4659 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4660 			    0xFFFF);
   4661 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4662 			val |= 0x3F;
   4663 			wm_kmrn_writereg(sc,
   4664 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4665 			break;
   4666 		default:
   4667 			break;
   4668 		}
   4669 
   4670 		if (sc->sc_type == WM_T_80003) {
   4671 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4672 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4673 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4674 
   4675 			/* Bypass RX and TX FIFO's */
   4676 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4677 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4678 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4679 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4680 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4681 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4682 		}
   4683 	}
   4684 #if 0
   4685 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4686 #endif
   4687 
   4688 	/* Set up checksum offload parameters. */
   4689 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4690 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4691 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4692 		reg |= RXCSUM_IPOFL;
   4693 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4694 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4695 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4696 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4697 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4698 
   4699 	/* Set up MSI-X */
   4700 	if (sc->sc_nintrs > 1) {
   4701 		uint32_t ivar;
   4702 		struct wm_queue *wmq;
   4703 		int qid, qintr_idx;
   4704 
   4705 		if (sc->sc_type == WM_T_82575) {
   4706 			/* Interrupt control */
   4707 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4708 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4709 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4710 
   4711 			/* TX and RX */
   4712 			for (i = 0; i < sc->sc_nqueues; i++) {
   4713 				wmq = &sc->sc_queue[i];
   4714 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4715 				    EITR_TX_QUEUE(wmq->wmq_id)
   4716 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4717 			}
   4718 			/* Link status */
   4719 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4720 			    EITR_OTHER);
   4721 		} else if (sc->sc_type == WM_T_82574) {
   4722 			/* Interrupt control */
   4723 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4724 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4725 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4726 
   4727 			ivar = 0;
   4728 			/* TX and RX */
   4729 			for (i = 0; i < sc->sc_nqueues; i++) {
   4730 				wmq = &sc->sc_queue[i];
   4731 				qid = wmq->wmq_id;
   4732 				qintr_idx = wmq->wmq_intr_idx;
   4733 
   4734 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4735 				    IVAR_TX_MASK_Q_82574(qid));
   4736 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4737 				    IVAR_RX_MASK_Q_82574(qid));
   4738 			}
   4739 			/* Link status */
   4740 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4741 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4742 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4743 		} else {
   4744 			/* Interrupt control */
   4745 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4746 			    | GPIE_EIAME | GPIE_PBA);
   4747 
   4748 			switch (sc->sc_type) {
   4749 			case WM_T_82580:
   4750 			case WM_T_I350:
   4751 			case WM_T_I354:
   4752 			case WM_T_I210:
   4753 			case WM_T_I211:
   4754 				/* TX and RX */
   4755 				for (i = 0; i < sc->sc_nqueues; i++) {
   4756 					wmq = &sc->sc_queue[i];
   4757 					qid = wmq->wmq_id;
   4758 					qintr_idx = wmq->wmq_intr_idx;
   4759 
   4760 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4761 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4762 					ivar |= __SHIFTIN((qintr_idx
   4763 						| IVAR_VALID),
   4764 					    IVAR_TX_MASK_Q(qid));
   4765 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4766 					ivar |= __SHIFTIN((qintr_idx
   4767 						| IVAR_VALID),
   4768 					    IVAR_RX_MASK_Q(qid));
   4769 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4770 				}
   4771 				break;
   4772 			case WM_T_82576:
   4773 				/* TX and RX */
   4774 				for (i = 0; i < sc->sc_nqueues; i++) {
   4775 					wmq = &sc->sc_queue[i];
   4776 					qid = wmq->wmq_id;
   4777 					qintr_idx = wmq->wmq_intr_idx;
   4778 
   4779 					ivar = CSR_READ(sc,
   4780 					    WMREG_IVAR_Q_82576(qid));
   4781 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4782 					ivar |= __SHIFTIN((qintr_idx
   4783 						| IVAR_VALID),
   4784 					    IVAR_TX_MASK_Q_82576(qid));
   4785 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4786 					ivar |= __SHIFTIN((qintr_idx
   4787 						| IVAR_VALID),
   4788 					    IVAR_RX_MASK_Q_82576(qid));
   4789 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4790 					    ivar);
   4791 				}
   4792 				break;
   4793 			default:
   4794 				break;
   4795 			}
   4796 
   4797 			/* Link status */
   4798 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4799 			    IVAR_MISC_OTHER);
   4800 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4801 		}
   4802 
   4803 		if (sc->sc_nqueues > 1) {
   4804 			wm_init_rss(sc);
   4805 
   4806 			/*
   4807 			** NOTE: Receive Full-Packet Checksum Offload
   4808 			** is mutually exclusive with Multiqueue. However
   4809 			** this is not the same as TCP/IP checksums which
   4810 			** still work.
   4811 			*/
   4812 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4813 			reg |= RXCSUM_PCSD;
   4814 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4815 		}
   4816 	}
   4817 
   4818 	/* Set up the interrupt registers. */
   4819 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4820 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4821 	    ICR_RXO | ICR_RXT0;
   4822 	if (sc->sc_nintrs > 1) {
   4823 		uint32_t mask;
   4824 		struct wm_queue *wmq;
   4825 
   4826 		switch (sc->sc_type) {
   4827 		case WM_T_82574:
   4828 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4829 			    WMREG_EIAC_82574_MSIX_MASK);
   4830 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4831 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4832 			break;
   4833 		default:
   4834 			if (sc->sc_type == WM_T_82575) {
   4835 				mask = 0;
   4836 				for (i = 0; i < sc->sc_nqueues; i++) {
   4837 					wmq = &sc->sc_queue[i];
   4838 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4839 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4840 				}
   4841 				mask |= EITR_OTHER;
   4842 			} else {
   4843 				mask = 0;
   4844 				for (i = 0; i < sc->sc_nqueues; i++) {
   4845 					wmq = &sc->sc_queue[i];
   4846 					mask |= 1 << wmq->wmq_intr_idx;
   4847 				}
   4848 				mask |= 1 << sc->sc_link_intr_idx;
   4849 			}
   4850 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4851 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4852 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4853 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4854 			break;
   4855 		}
   4856 	} else
   4857 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4858 
   4859 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4860 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4861 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4862 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4863 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4864 		reg |= KABGTXD_BGSQLBIAS;
   4865 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4866 	}
   4867 
   4868 	/* Set up the inter-packet gap. */
   4869 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4870 
   4871 	if (sc->sc_type >= WM_T_82543) {
   4872 		/*
   4873 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4874 		 * the multi queue function with MSI-X.
   4875 		 */
   4876 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4877 			int qidx;
   4878 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4879 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   4880 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   4881 				    sc->sc_itr);
   4882 			}
   4883 			/*
   4884 			 * Link interrupts occur much less than TX
   4885 			 * interrupts and RX interrupts. So, we don't
   4886 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4887 			 * FreeBSD's if_igb.
   4888 			 */
   4889 		} else
   4890 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4891 	}
   4892 
   4893 	/* Set the VLAN ethernetype. */
   4894 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4895 
   4896 	/*
   4897 	 * Set up the transmit control register; we start out with
   4898 	 * a collision distance suitable for FDX, but update it whe
   4899 	 * we resolve the media type.
   4900 	 */
   4901 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4902 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4903 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4904 	if (sc->sc_type >= WM_T_82571)
   4905 		sc->sc_tctl |= TCTL_MULR;
   4906 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4907 
   4908 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4909 		/* Write TDT after TCTL.EN is set. See the document. */
   4910 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4911 	}
   4912 
   4913 	if (sc->sc_type == WM_T_80003) {
   4914 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4915 		reg &= ~TCTL_EXT_GCEX_MASK;
   4916 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4917 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4918 	}
   4919 
   4920 	/* Set the media. */
   4921 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4922 		goto out;
   4923 
   4924 	/* Configure for OS presence */
   4925 	wm_init_manageability(sc);
   4926 
   4927 	/*
   4928 	 * Set up the receive control register; we actually program
   4929 	 * the register when we set the receive filter.  Use multicast
   4930 	 * address offset type 0.
   4931 	 *
   4932 	 * Only the i82544 has the ability to strip the incoming
   4933 	 * CRC, so we don't enable that feature.
   4934 	 */
   4935 	sc->sc_mchash_type = 0;
   4936 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4937 	    | RCTL_MO(sc->sc_mchash_type);
   4938 
   4939 	/*
   4940 	 * The I350 has a bug where it always strips the CRC whether
   4941 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4942 	 */
   4943 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4944 	    || (sc->sc_type == WM_T_I210))
   4945 		sc->sc_rctl |= RCTL_SECRC;
   4946 
   4947 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4948 	    && (ifp->if_mtu > ETHERMTU)) {
   4949 		sc->sc_rctl |= RCTL_LPE;
   4950 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4951 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4952 	}
   4953 
   4954 	if (MCLBYTES == 2048) {
   4955 		sc->sc_rctl |= RCTL_2k;
   4956 	} else {
   4957 		if (sc->sc_type >= WM_T_82543) {
   4958 			switch (MCLBYTES) {
   4959 			case 4096:
   4960 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4961 				break;
   4962 			case 8192:
   4963 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4964 				break;
   4965 			case 16384:
   4966 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4967 				break;
   4968 			default:
   4969 				panic("wm_init: MCLBYTES %d unsupported",
   4970 				    MCLBYTES);
   4971 				break;
   4972 			}
   4973 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4974 	}
   4975 
   4976 	/* Set the receive filter. */
   4977 	wm_set_filter(sc);
   4978 
   4979 	/* Enable ECC */
   4980 	switch (sc->sc_type) {
   4981 	case WM_T_82571:
   4982 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4983 		reg |= PBA_ECC_CORR_EN;
   4984 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4985 		break;
   4986 	case WM_T_PCH_LPT:
   4987 	case WM_T_PCH_SPT:
   4988 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4989 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4990 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4991 
   4992 		reg = CSR_READ(sc, WMREG_CTRL);
   4993 		reg |= CTRL_MEHE;
   4994 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4995 		break;
   4996 	default:
   4997 		break;
   4998 	}
   4999 
   5000 	/* On 575 and later set RDT only if RX enabled */
   5001 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5002 		int qidx;
   5003 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5004 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5005 			for (i = 0; i < WM_NRXDESC; i++) {
   5006 				mutex_enter(rxq->rxq_lock);
   5007 				wm_init_rxdesc(rxq, i);
   5008 				mutex_exit(rxq->rxq_lock);
   5009 
   5010 			}
   5011 		}
   5012 	}
   5013 
   5014 	sc->sc_stopping = false;
   5015 
   5016 	/* Start the one second link check clock. */
   5017 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5018 
   5019 	/* ...all done! */
   5020 	ifp->if_flags |= IFF_RUNNING;
   5021 	ifp->if_flags &= ~IFF_OACTIVE;
   5022 
   5023  out:
   5024 	sc->sc_if_flags = ifp->if_flags;
   5025 	if (error)
   5026 		log(LOG_ERR, "%s: interface not running\n",
   5027 		    device_xname(sc->sc_dev));
   5028 	return error;
   5029 }
   5030 
   5031 /*
   5032  * wm_stop:		[ifnet interface function]
   5033  *
   5034  *	Stop transmission on the interface.
   5035  */
   5036 static void
   5037 wm_stop(struct ifnet *ifp, int disable)
   5038 {
   5039 	struct wm_softc *sc = ifp->if_softc;
   5040 
   5041 	WM_CORE_LOCK(sc);
   5042 	wm_stop_locked(ifp, disable);
   5043 	WM_CORE_UNLOCK(sc);
   5044 }
   5045 
   5046 static void
   5047 wm_stop_locked(struct ifnet *ifp, int disable)
   5048 {
   5049 	struct wm_softc *sc = ifp->if_softc;
   5050 	struct wm_txsoft *txs;
   5051 	int i, qidx;
   5052 
   5053 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5054 		device_xname(sc->sc_dev), __func__));
   5055 	KASSERT(WM_CORE_LOCKED(sc));
   5056 
   5057 	sc->sc_stopping = true;
   5058 
   5059 	/* Stop the one second clock. */
   5060 	callout_stop(&sc->sc_tick_ch);
   5061 
   5062 	/* Stop the 82547 Tx FIFO stall check timer. */
   5063 	if (sc->sc_type == WM_T_82547)
   5064 		callout_stop(&sc->sc_txfifo_ch);
   5065 
   5066 	if (sc->sc_flags & WM_F_HAS_MII) {
   5067 		/* Down the MII. */
   5068 		mii_down(&sc->sc_mii);
   5069 	} else {
   5070 #if 0
   5071 		/* Should we clear PHY's status properly? */
   5072 		wm_reset(sc);
   5073 #endif
   5074 	}
   5075 
   5076 	/* Stop the transmit and receive processes. */
   5077 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5078 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5079 	sc->sc_rctl &= ~RCTL_EN;
   5080 
   5081 	/*
   5082 	 * Clear the interrupt mask to ensure the device cannot assert its
   5083 	 * interrupt line.
   5084 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5085 	 * service any currently pending or shared interrupt.
   5086 	 */
   5087 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5088 	sc->sc_icr = 0;
   5089 	if (sc->sc_nintrs > 1) {
   5090 		if (sc->sc_type != WM_T_82574) {
   5091 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5092 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5093 		} else
   5094 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5095 	}
   5096 
   5097 	/* Release any queued transmit buffers. */
   5098 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5099 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5100 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5101 		mutex_enter(txq->txq_lock);
   5102 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5103 			txs = &txq->txq_soft[i];
   5104 			if (txs->txs_mbuf != NULL) {
   5105 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5106 				m_freem(txs->txs_mbuf);
   5107 				txs->txs_mbuf = NULL;
   5108 			}
   5109 		}
   5110 		if (sc->sc_type == WM_T_PCH_SPT) {
   5111 			pcireg_t preg;
   5112 			uint32_t reg;
   5113 			int nexttx;
   5114 
   5115 			/* First, disable MULR fix in FEXTNVM11 */
   5116 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5117 			reg |= FEXTNVM11_DIS_MULRFIX;
   5118 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5119 
   5120 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5121 			    WM_PCI_DESCRING_STATUS);
   5122 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5123 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5124 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5125 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5126 			    && (reg != 0)) {
   5127 				/* TX */
   5128 				printf("XXX need TX flush (reg = %08x)\n",
   5129 				    preg);
   5130 				wm_init_tx_descs(sc, txq);
   5131 				wm_init_tx_regs(sc, wmq, txq);
   5132 				nexttx = txq->txq_next;
   5133 				wm_set_dma_addr(
   5134 					&txq->txq_descs[nexttx].wtx_addr,
   5135 					WM_CDTXADDR(txq, nexttx));
   5136 				txq->txq_descs[nexttx].wtx_cmdlen
   5137 				    = htole32(WTX_CMD_IFCS | 512);
   5138 				wm_cdtxsync(txq, nexttx, 1,
   5139 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5140 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5141 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5142 				CSR_WRITE_FLUSH(sc);
   5143 				delay(250);
   5144 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5145 			}
   5146 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5147 			    WM_PCI_DESCRING_STATUS);
   5148 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5149 				/* RX */
   5150 				printf("XXX need RX flush\n");
   5151 			}
   5152 		}
   5153 		mutex_exit(txq->txq_lock);
   5154 	}
   5155 
   5156 	/* Mark the interface as down and cancel the watchdog timer. */
   5157 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5158 	ifp->if_timer = 0;
   5159 
   5160 	if (disable) {
   5161 		for (i = 0; i < sc->sc_nqueues; i++) {
   5162 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5163 			mutex_enter(rxq->rxq_lock);
   5164 			wm_rxdrain(rxq);
   5165 			mutex_exit(rxq->rxq_lock);
   5166 		}
   5167 	}
   5168 
   5169 #if 0 /* notyet */
   5170 	if (sc->sc_type >= WM_T_82544)
   5171 		CSR_WRITE(sc, WMREG_WUC, 0);
   5172 #endif
   5173 }
   5174 
   5175 static void
   5176 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5177 {
   5178 	struct mbuf *m;
   5179 	int i;
   5180 
   5181 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5182 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5183 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5184 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5185 		    m->m_data, m->m_len, m->m_flags);
   5186 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5187 	    i, i == 1 ? "" : "s");
   5188 }
   5189 
   5190 /*
   5191  * wm_82547_txfifo_stall:
   5192  *
   5193  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5194  *	reset the FIFO pointers, and restart packet transmission.
   5195  */
   5196 static void
   5197 wm_82547_txfifo_stall(void *arg)
   5198 {
   5199 	struct wm_softc *sc = arg;
   5200 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5201 
   5202 	mutex_enter(txq->txq_lock);
   5203 
   5204 	if (sc->sc_stopping)
   5205 		goto out;
   5206 
   5207 	if (txq->txq_fifo_stall) {
   5208 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5209 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5210 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5211 			/*
   5212 			 * Packets have drained.  Stop transmitter, reset
   5213 			 * FIFO pointers, restart transmitter, and kick
   5214 			 * the packet queue.
   5215 			 */
   5216 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5217 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5218 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5219 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5220 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5221 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5222 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5223 			CSR_WRITE_FLUSH(sc);
   5224 
   5225 			txq->txq_fifo_head = 0;
   5226 			txq->txq_fifo_stall = 0;
   5227 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5228 		} else {
   5229 			/*
   5230 			 * Still waiting for packets to drain; try again in
   5231 			 * another tick.
   5232 			 */
   5233 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5234 		}
   5235 	}
   5236 
   5237 out:
   5238 	mutex_exit(txq->txq_lock);
   5239 }
   5240 
   5241 /*
   5242  * wm_82547_txfifo_bugchk:
   5243  *
   5244  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5245  *	prevent enqueueing a packet that would wrap around the end
   5246  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5247  *
   5248  *	We do this by checking the amount of space before the end
   5249  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5250  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5251  *	the internal FIFO pointers to the beginning, and restart
   5252  *	transmission on the interface.
   5253  */
   5254 #define	WM_FIFO_HDR		0x10
   5255 #define	WM_82547_PAD_LEN	0x3e0
   5256 static int
   5257 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5258 {
   5259 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5260 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5261 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5262 
   5263 	/* Just return if already stalled. */
   5264 	if (txq->txq_fifo_stall)
   5265 		return 1;
   5266 
   5267 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5268 		/* Stall only occurs in half-duplex mode. */
   5269 		goto send_packet;
   5270 	}
   5271 
   5272 	if (len >= WM_82547_PAD_LEN + space) {
   5273 		txq->txq_fifo_stall = 1;
   5274 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5275 		return 1;
   5276 	}
   5277 
   5278  send_packet:
   5279 	txq->txq_fifo_head += len;
   5280 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5281 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5282 
   5283 	return 0;
   5284 }
   5285 
   5286 static int
   5287 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5288 {
   5289 	int error;
   5290 
   5291 	/*
   5292 	 * Allocate the control data structures, and create and load the
   5293 	 * DMA map for it.
   5294 	 *
   5295 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5296 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5297 	 * both sets within the same 4G segment.
   5298 	 */
   5299 	if (sc->sc_type < WM_T_82544)
   5300 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5301 	else
   5302 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5303 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5304 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5305 	else
   5306 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5307 
   5308 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5309 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5310 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5311 		aprint_error_dev(sc->sc_dev,
   5312 		    "unable to allocate TX control data, error = %d\n",
   5313 		    error);
   5314 		goto fail_0;
   5315 	}
   5316 
   5317 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5318 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5319 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5320 		aprint_error_dev(sc->sc_dev,
   5321 		    "unable to map TX control data, error = %d\n", error);
   5322 		goto fail_1;
   5323 	}
   5324 
   5325 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5326 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5327 		aprint_error_dev(sc->sc_dev,
   5328 		    "unable to create TX control data DMA map, error = %d\n",
   5329 		    error);
   5330 		goto fail_2;
   5331 	}
   5332 
   5333 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5334 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5335 		aprint_error_dev(sc->sc_dev,
   5336 		    "unable to load TX control data DMA map, error = %d\n",
   5337 		    error);
   5338 		goto fail_3;
   5339 	}
   5340 
   5341 	return 0;
   5342 
   5343  fail_3:
   5344 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5345  fail_2:
   5346 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5347 	    WM_TXDESCS_SIZE(txq));
   5348  fail_1:
   5349 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5350  fail_0:
   5351 	return error;
   5352 }
   5353 
   5354 static void
   5355 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5356 {
   5357 
   5358 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5359 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5360 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5361 	    WM_TXDESCS_SIZE(txq));
   5362 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5363 }
   5364 
   5365 static int
   5366 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5367 {
   5368 	int error;
   5369 
   5370 	/*
   5371 	 * Allocate the control data structures, and create and load the
   5372 	 * DMA map for it.
   5373 	 *
   5374 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5375 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5376 	 * both sets within the same 4G segment.
   5377 	 */
   5378 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5379 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5380 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5381 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5382 		aprint_error_dev(sc->sc_dev,
   5383 		    "unable to allocate RX control data, error = %d\n",
   5384 		    error);
   5385 		goto fail_0;
   5386 	}
   5387 
   5388 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5389 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5390 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5391 		aprint_error_dev(sc->sc_dev,
   5392 		    "unable to map RX control data, error = %d\n", error);
   5393 		goto fail_1;
   5394 	}
   5395 
   5396 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5397 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5398 		aprint_error_dev(sc->sc_dev,
   5399 		    "unable to create RX control data DMA map, error = %d\n",
   5400 		    error);
   5401 		goto fail_2;
   5402 	}
   5403 
   5404 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5405 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5406 		aprint_error_dev(sc->sc_dev,
   5407 		    "unable to load RX control data DMA map, error = %d\n",
   5408 		    error);
   5409 		goto fail_3;
   5410 	}
   5411 
   5412 	return 0;
   5413 
   5414  fail_3:
   5415 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5416  fail_2:
   5417 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5418 	    rxq->rxq_desc_size);
   5419  fail_1:
   5420 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5421  fail_0:
   5422 	return error;
   5423 }
   5424 
   5425 static void
   5426 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5427 {
   5428 
   5429 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5430 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5431 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5432 	    rxq->rxq_desc_size);
   5433 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5434 }
   5435 
   5436 
   5437 static int
   5438 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5439 {
   5440 	int i, error;
   5441 
   5442 	/* Create the transmit buffer DMA maps. */
   5443 	WM_TXQUEUELEN(txq) =
   5444 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5445 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5446 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5447 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5448 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5449 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5450 			aprint_error_dev(sc->sc_dev,
   5451 			    "unable to create Tx DMA map %d, error = %d\n",
   5452 			    i, error);
   5453 			goto fail;
   5454 		}
   5455 	}
   5456 
   5457 	return 0;
   5458 
   5459  fail:
   5460 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5461 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5462 			bus_dmamap_destroy(sc->sc_dmat,
   5463 			    txq->txq_soft[i].txs_dmamap);
   5464 	}
   5465 	return error;
   5466 }
   5467 
   5468 static void
   5469 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5470 {
   5471 	int i;
   5472 
   5473 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5474 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5475 			bus_dmamap_destroy(sc->sc_dmat,
   5476 			    txq->txq_soft[i].txs_dmamap);
   5477 	}
   5478 }
   5479 
   5480 static int
   5481 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5482 {
   5483 	int i, error;
   5484 
   5485 	/* Create the receive buffer DMA maps. */
   5486 	for (i = 0; i < WM_NRXDESC; i++) {
   5487 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5488 			    MCLBYTES, 0, 0,
   5489 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5490 			aprint_error_dev(sc->sc_dev,
   5491 			    "unable to create Rx DMA map %d error = %d\n",
   5492 			    i, error);
   5493 			goto fail;
   5494 		}
   5495 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5496 	}
   5497 
   5498 	return 0;
   5499 
   5500  fail:
   5501 	for (i = 0; i < WM_NRXDESC; i++) {
   5502 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5503 			bus_dmamap_destroy(sc->sc_dmat,
   5504 			    rxq->rxq_soft[i].rxs_dmamap);
   5505 	}
   5506 	return error;
   5507 }
   5508 
   5509 static void
   5510 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5511 {
   5512 	int i;
   5513 
   5514 	for (i = 0; i < WM_NRXDESC; i++) {
   5515 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5516 			bus_dmamap_destroy(sc->sc_dmat,
   5517 			    rxq->rxq_soft[i].rxs_dmamap);
   5518 	}
   5519 }
   5520 
   5521 /*
   5522  * wm_alloc_quques:
   5523  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5524  */
   5525 static int
   5526 wm_alloc_txrx_queues(struct wm_softc *sc)
   5527 {
   5528 	int i, error, tx_done, rx_done;
   5529 
   5530 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5531 	    KM_SLEEP);
   5532 	if (sc->sc_queue == NULL) {
   5533 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5534 		error = ENOMEM;
   5535 		goto fail_0;
   5536 	}
   5537 
   5538 	/*
   5539 	 * For transmission
   5540 	 */
   5541 	error = 0;
   5542 	tx_done = 0;
   5543 	for (i = 0; i < sc->sc_nqueues; i++) {
   5544 #ifdef WM_EVENT_COUNTERS
   5545 		int j;
   5546 		const char *xname;
   5547 #endif
   5548 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5549 		txq->txq_sc = sc;
   5550 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5551 
   5552 		error = wm_alloc_tx_descs(sc, txq);
   5553 		if (error)
   5554 			break;
   5555 		error = wm_alloc_tx_buffer(sc, txq);
   5556 		if (error) {
   5557 			wm_free_tx_descs(sc, txq);
   5558 			break;
   5559 		}
   5560 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5561 		if (txq->txq_interq == NULL) {
   5562 			wm_free_tx_descs(sc, txq);
   5563 			wm_free_tx_buffer(sc, txq);
   5564 			error = ENOMEM;
   5565 			break;
   5566 		}
   5567 
   5568 #ifdef WM_EVENT_COUNTERS
   5569 		xname = device_xname(sc->sc_dev);
   5570 
   5571 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5572 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5573 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5574 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5575 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5576 
   5577 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5578 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5579 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5580 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5581 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5582 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5583 
   5584 		for (j = 0; j < WM_NTXSEGS; j++) {
   5585 			snprintf(txq->txq_txseg_evcnt_names[j],
   5586 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5587 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5588 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5589 		}
   5590 
   5591 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5592 
   5593 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5594 #endif /* WM_EVENT_COUNTERS */
   5595 
   5596 		tx_done++;
   5597 	}
   5598 	if (error)
   5599 		goto fail_1;
   5600 
   5601 	/*
   5602 	 * For recieve
   5603 	 */
   5604 	error = 0;
   5605 	rx_done = 0;
   5606 	for (i = 0; i < sc->sc_nqueues; i++) {
   5607 #ifdef WM_EVENT_COUNTERS
   5608 		const char *xname;
   5609 #endif
   5610 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5611 		rxq->rxq_sc = sc;
   5612 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5613 
   5614 		error = wm_alloc_rx_descs(sc, rxq);
   5615 		if (error)
   5616 			break;
   5617 
   5618 		error = wm_alloc_rx_buffer(sc, rxq);
   5619 		if (error) {
   5620 			wm_free_rx_descs(sc, rxq);
   5621 			break;
   5622 		}
   5623 
   5624 #ifdef WM_EVENT_COUNTERS
   5625 		xname = device_xname(sc->sc_dev);
   5626 
   5627 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5628 
   5629 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5630 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5631 #endif /* WM_EVENT_COUNTERS */
   5632 
   5633 		rx_done++;
   5634 	}
   5635 	if (error)
   5636 		goto fail_2;
   5637 
   5638 	return 0;
   5639 
   5640  fail_2:
   5641 	for (i = 0; i < rx_done; i++) {
   5642 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5643 		wm_free_rx_buffer(sc, rxq);
   5644 		wm_free_rx_descs(sc, rxq);
   5645 		if (rxq->rxq_lock)
   5646 			mutex_obj_free(rxq->rxq_lock);
   5647 	}
   5648  fail_1:
   5649 	for (i = 0; i < tx_done; i++) {
   5650 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5651 		pcq_destroy(txq->txq_interq);
   5652 		wm_free_tx_buffer(sc, txq);
   5653 		wm_free_tx_descs(sc, txq);
   5654 		if (txq->txq_lock)
   5655 			mutex_obj_free(txq->txq_lock);
   5656 	}
   5657 
   5658 	kmem_free(sc->sc_queue,
   5659 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5660  fail_0:
   5661 	return error;
   5662 }
   5663 
   5664 /*
   5665  * wm_free_quques:
   5666  *	Free {tx,rx}descs and {tx,rx} buffers
   5667  */
   5668 static void
   5669 wm_free_txrx_queues(struct wm_softc *sc)
   5670 {
   5671 	int i;
   5672 
   5673 	for (i = 0; i < sc->sc_nqueues; i++) {
   5674 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5675 		wm_free_rx_buffer(sc, rxq);
   5676 		wm_free_rx_descs(sc, rxq);
   5677 		if (rxq->rxq_lock)
   5678 			mutex_obj_free(rxq->rxq_lock);
   5679 	}
   5680 
   5681 	for (i = 0; i < sc->sc_nqueues; i++) {
   5682 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5683 		wm_free_tx_buffer(sc, txq);
   5684 		wm_free_tx_descs(sc, txq);
   5685 		if (txq->txq_lock)
   5686 			mutex_obj_free(txq->txq_lock);
   5687 	}
   5688 
   5689 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5690 }
   5691 
   5692 static void
   5693 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5694 {
   5695 
   5696 	KASSERT(mutex_owned(txq->txq_lock));
   5697 
   5698 	/* Initialize the transmit descriptor ring. */
   5699 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5700 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5701 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5702 	txq->txq_free = WM_NTXDESC(txq);
   5703 	txq->txq_next = 0;
   5704 }
   5705 
   5706 static void
   5707 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5708     struct wm_txqueue *txq)
   5709 {
   5710 
   5711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5712 		device_xname(sc->sc_dev), __func__));
   5713 	KASSERT(mutex_owned(txq->txq_lock));
   5714 
   5715 	if (sc->sc_type < WM_T_82543) {
   5716 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5717 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5718 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5719 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5720 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5721 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5722 	} else {
   5723 		int qid = wmq->wmq_id;
   5724 
   5725 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5726 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5727 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5728 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5729 
   5730 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5731 			/*
   5732 			 * Don't write TDT before TCTL.EN is set.
   5733 			 * See the document.
   5734 			 */
   5735 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5736 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5737 			    | TXDCTL_WTHRESH(0));
   5738 		else {
   5739 			/* ITR / 4 */
   5740 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5741 			if (sc->sc_type >= WM_T_82540) {
   5742 				/* should be same */
   5743 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5744 			}
   5745 
   5746 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5747 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5748 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5749 		}
   5750 	}
   5751 }
   5752 
   5753 static void
   5754 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5755 {
   5756 	int i;
   5757 
   5758 	KASSERT(mutex_owned(txq->txq_lock));
   5759 
   5760 	/* Initialize the transmit job descriptors. */
   5761 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5762 		txq->txq_soft[i].txs_mbuf = NULL;
   5763 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5764 	txq->txq_snext = 0;
   5765 	txq->txq_sdirty = 0;
   5766 }
   5767 
   5768 static void
   5769 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5770     struct wm_txqueue *txq)
   5771 {
   5772 
   5773 	KASSERT(mutex_owned(txq->txq_lock));
   5774 
   5775 	/*
   5776 	 * Set up some register offsets that are different between
   5777 	 * the i82542 and the i82543 and later chips.
   5778 	 */
   5779 	if (sc->sc_type < WM_T_82543)
   5780 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5781 	else
   5782 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5783 
   5784 	wm_init_tx_descs(sc, txq);
   5785 	wm_init_tx_regs(sc, wmq, txq);
   5786 	wm_init_tx_buffer(sc, txq);
   5787 }
   5788 
   5789 static void
   5790 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5791     struct wm_rxqueue *rxq)
   5792 {
   5793 
   5794 	KASSERT(mutex_owned(rxq->rxq_lock));
   5795 
   5796 	/*
   5797 	 * Initialize the receive descriptor and receive job
   5798 	 * descriptor rings.
   5799 	 */
   5800 	if (sc->sc_type < WM_T_82543) {
   5801 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5802 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5803 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5804 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5805 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5806 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5807 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5808 
   5809 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5810 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5811 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5812 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5813 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5814 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5815 	} else {
   5816 		int qid = wmq->wmq_id;
   5817 
   5818 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5819 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5820 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5821 
   5822 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5823 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5824 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5825 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5826 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5827 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5828 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5829 			    | RXDCTL_WTHRESH(1));
   5830 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5831 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5832 		} else {
   5833 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5834 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5835 			/* ITR / 4 */
   5836 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5837 			/* MUST be same */
   5838 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5839 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5840 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5841 		}
   5842 	}
   5843 }
   5844 
   5845 static int
   5846 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5847 {
   5848 	struct wm_rxsoft *rxs;
   5849 	int error, i;
   5850 
   5851 	KASSERT(mutex_owned(rxq->rxq_lock));
   5852 
   5853 	for (i = 0; i < WM_NRXDESC; i++) {
   5854 		rxs = &rxq->rxq_soft[i];
   5855 		if (rxs->rxs_mbuf == NULL) {
   5856 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5857 				log(LOG_ERR, "%s: unable to allocate or map "
   5858 				    "rx buffer %d, error = %d\n",
   5859 				    device_xname(sc->sc_dev), i, error);
   5860 				/*
   5861 				 * XXX Should attempt to run with fewer receive
   5862 				 * XXX buffers instead of just failing.
   5863 				 */
   5864 				wm_rxdrain(rxq);
   5865 				return ENOMEM;
   5866 			}
   5867 		} else {
   5868 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5869 				wm_init_rxdesc(rxq, i);
   5870 			/*
   5871 			 * For 82575 and newer device, the RX descriptors
   5872 			 * must be initialized after the setting of RCTL.EN in
   5873 			 * wm_set_filter()
   5874 			 */
   5875 		}
   5876 	}
   5877 	rxq->rxq_ptr = 0;
   5878 	rxq->rxq_discard = 0;
   5879 	WM_RXCHAIN_RESET(rxq);
   5880 
   5881 	return 0;
   5882 }
   5883 
   5884 static int
   5885 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5886     struct wm_rxqueue *rxq)
   5887 {
   5888 
   5889 	KASSERT(mutex_owned(rxq->rxq_lock));
   5890 
   5891 	/*
   5892 	 * Set up some register offsets that are different between
   5893 	 * the i82542 and the i82543 and later chips.
   5894 	 */
   5895 	if (sc->sc_type < WM_T_82543)
   5896 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5897 	else
   5898 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   5899 
   5900 	wm_init_rx_regs(sc, wmq, rxq);
   5901 	return wm_init_rx_buffer(sc, rxq);
   5902 }
   5903 
   5904 /*
   5905  * wm_init_quques:
   5906  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5907  */
   5908 static int
   5909 wm_init_txrx_queues(struct wm_softc *sc)
   5910 {
   5911 	int i, error = 0;
   5912 
   5913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5914 		device_xname(sc->sc_dev), __func__));
   5915 
   5916 	for (i = 0; i < sc->sc_nqueues; i++) {
   5917 		struct wm_queue *wmq = &sc->sc_queue[i];
   5918 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5919 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5920 
   5921 		mutex_enter(txq->txq_lock);
   5922 		wm_init_tx_queue(sc, wmq, txq);
   5923 		mutex_exit(txq->txq_lock);
   5924 
   5925 		mutex_enter(rxq->rxq_lock);
   5926 		error = wm_init_rx_queue(sc, wmq, rxq);
   5927 		mutex_exit(rxq->rxq_lock);
   5928 		if (error)
   5929 			break;
   5930 	}
   5931 
   5932 	return error;
   5933 }
   5934 
   5935 /*
   5936  * wm_tx_offload:
   5937  *
   5938  *	Set up TCP/IP checksumming parameters for the
   5939  *	specified packet.
   5940  */
   5941 static int
   5942 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5943     uint8_t *fieldsp)
   5944 {
   5945 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5946 	struct mbuf *m0 = txs->txs_mbuf;
   5947 	struct livengood_tcpip_ctxdesc *t;
   5948 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5949 	uint32_t ipcse;
   5950 	struct ether_header *eh;
   5951 	int offset, iphl;
   5952 	uint8_t fields;
   5953 
   5954 	/*
   5955 	 * XXX It would be nice if the mbuf pkthdr had offset
   5956 	 * fields for the protocol headers.
   5957 	 */
   5958 
   5959 	eh = mtod(m0, struct ether_header *);
   5960 	switch (htons(eh->ether_type)) {
   5961 	case ETHERTYPE_IP:
   5962 	case ETHERTYPE_IPV6:
   5963 		offset = ETHER_HDR_LEN;
   5964 		break;
   5965 
   5966 	case ETHERTYPE_VLAN:
   5967 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5968 		break;
   5969 
   5970 	default:
   5971 		/*
   5972 		 * Don't support this protocol or encapsulation.
   5973 		 */
   5974 		*fieldsp = 0;
   5975 		*cmdp = 0;
   5976 		return 0;
   5977 	}
   5978 
   5979 	if ((m0->m_pkthdr.csum_flags &
   5980 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5981 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5982 	} else {
   5983 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5984 	}
   5985 	ipcse = offset + iphl - 1;
   5986 
   5987 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5988 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5989 	seg = 0;
   5990 	fields = 0;
   5991 
   5992 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5993 		int hlen = offset + iphl;
   5994 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5995 
   5996 		if (__predict_false(m0->m_len <
   5997 				    (hlen + sizeof(struct tcphdr)))) {
   5998 			/*
   5999 			 * TCP/IP headers are not in the first mbuf; we need
   6000 			 * to do this the slow and painful way.  Let's just
   6001 			 * hope this doesn't happen very often.
   6002 			 */
   6003 			struct tcphdr th;
   6004 
   6005 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6006 
   6007 			m_copydata(m0, hlen, sizeof(th), &th);
   6008 			if (v4) {
   6009 				struct ip ip;
   6010 
   6011 				m_copydata(m0, offset, sizeof(ip), &ip);
   6012 				ip.ip_len = 0;
   6013 				m_copyback(m0,
   6014 				    offset + offsetof(struct ip, ip_len),
   6015 				    sizeof(ip.ip_len), &ip.ip_len);
   6016 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6017 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6018 			} else {
   6019 				struct ip6_hdr ip6;
   6020 
   6021 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6022 				ip6.ip6_plen = 0;
   6023 				m_copyback(m0,
   6024 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6025 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6026 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6027 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6028 			}
   6029 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6030 			    sizeof(th.th_sum), &th.th_sum);
   6031 
   6032 			hlen += th.th_off << 2;
   6033 		} else {
   6034 			/*
   6035 			 * TCP/IP headers are in the first mbuf; we can do
   6036 			 * this the easy way.
   6037 			 */
   6038 			struct tcphdr *th;
   6039 
   6040 			if (v4) {
   6041 				struct ip *ip =
   6042 				    (void *)(mtod(m0, char *) + offset);
   6043 				th = (void *)(mtod(m0, char *) + hlen);
   6044 
   6045 				ip->ip_len = 0;
   6046 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6047 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6048 			} else {
   6049 				struct ip6_hdr *ip6 =
   6050 				    (void *)(mtod(m0, char *) + offset);
   6051 				th = (void *)(mtod(m0, char *) + hlen);
   6052 
   6053 				ip6->ip6_plen = 0;
   6054 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6055 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6056 			}
   6057 			hlen += th->th_off << 2;
   6058 		}
   6059 
   6060 		if (v4) {
   6061 			WM_Q_EVCNT_INCR(txq, txtso);
   6062 			cmdlen |= WTX_TCPIP_CMD_IP;
   6063 		} else {
   6064 			WM_Q_EVCNT_INCR(txq, txtso6);
   6065 			ipcse = 0;
   6066 		}
   6067 		cmd |= WTX_TCPIP_CMD_TSE;
   6068 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6069 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6070 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6071 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6072 	}
   6073 
   6074 	/*
   6075 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6076 	 * offload feature, if we load the context descriptor, we
   6077 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6078 	 */
   6079 
   6080 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6081 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6082 	    WTX_TCPIP_IPCSE(ipcse);
   6083 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6084 		WM_Q_EVCNT_INCR(txq, txipsum);
   6085 		fields |= WTX_IXSM;
   6086 	}
   6087 
   6088 	offset += iphl;
   6089 
   6090 	if (m0->m_pkthdr.csum_flags &
   6091 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6092 		WM_Q_EVCNT_INCR(txq, txtusum);
   6093 		fields |= WTX_TXSM;
   6094 		tucs = WTX_TCPIP_TUCSS(offset) |
   6095 		    WTX_TCPIP_TUCSO(offset +
   6096 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6097 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6098 	} else if ((m0->m_pkthdr.csum_flags &
   6099 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6100 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6101 		fields |= WTX_TXSM;
   6102 		tucs = WTX_TCPIP_TUCSS(offset) |
   6103 		    WTX_TCPIP_TUCSO(offset +
   6104 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6105 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6106 	} else {
   6107 		/* Just initialize it to a valid TCP context. */
   6108 		tucs = WTX_TCPIP_TUCSS(offset) |
   6109 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6110 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6111 	}
   6112 
   6113 	/* Fill in the context descriptor. */
   6114 	t = (struct livengood_tcpip_ctxdesc *)
   6115 	    &txq->txq_descs[txq->txq_next];
   6116 	t->tcpip_ipcs = htole32(ipcs);
   6117 	t->tcpip_tucs = htole32(tucs);
   6118 	t->tcpip_cmdlen = htole32(cmdlen);
   6119 	t->tcpip_seg = htole32(seg);
   6120 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6121 
   6122 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6123 	txs->txs_ndesc++;
   6124 
   6125 	*cmdp = cmd;
   6126 	*fieldsp = fields;
   6127 
   6128 	return 0;
   6129 }
   6130 
   6131 /*
   6132  * wm_start:		[ifnet interface function]
   6133  *
   6134  *	Start packet transmission on the interface.
   6135  */
   6136 static void
   6137 wm_start(struct ifnet *ifp)
   6138 {
   6139 	struct wm_softc *sc = ifp->if_softc;
   6140 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6141 
   6142 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6143 
   6144 	mutex_enter(txq->txq_lock);
   6145 	if (!sc->sc_stopping)
   6146 		wm_start_locked(ifp);
   6147 	mutex_exit(txq->txq_lock);
   6148 }
   6149 
   6150 static void
   6151 wm_start_locked(struct ifnet *ifp)
   6152 {
   6153 	struct wm_softc *sc = ifp->if_softc;
   6154 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6155 	struct mbuf *m0;
   6156 	struct m_tag *mtag;
   6157 	struct wm_txsoft *txs;
   6158 	bus_dmamap_t dmamap;
   6159 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6160 	bus_addr_t curaddr;
   6161 	bus_size_t seglen, curlen;
   6162 	uint32_t cksumcmd;
   6163 	uint8_t cksumfields;
   6164 
   6165 	KASSERT(mutex_owned(txq->txq_lock));
   6166 
   6167 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6168 		return;
   6169 
   6170 	/* Remember the previous number of free descriptors. */
   6171 	ofree = txq->txq_free;
   6172 
   6173 	/*
   6174 	 * Loop through the send queue, setting up transmit descriptors
   6175 	 * until we drain the queue, or use up all available transmit
   6176 	 * descriptors.
   6177 	 */
   6178 	for (;;) {
   6179 		m0 = NULL;
   6180 
   6181 		/* Get a work queue entry. */
   6182 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6183 			wm_txeof(sc, txq);
   6184 			if (txq->txq_sfree == 0) {
   6185 				DPRINTF(WM_DEBUG_TX,
   6186 				    ("%s: TX: no free job descriptors\n",
   6187 					device_xname(sc->sc_dev)));
   6188 				WM_Q_EVCNT_INCR(txq, txsstall);
   6189 				break;
   6190 			}
   6191 		}
   6192 
   6193 		/* Grab a packet off the queue. */
   6194 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6195 		if (m0 == NULL)
   6196 			break;
   6197 
   6198 		DPRINTF(WM_DEBUG_TX,
   6199 		    ("%s: TX: have packet to transmit: %p\n",
   6200 		    device_xname(sc->sc_dev), m0));
   6201 
   6202 		txs = &txq->txq_soft[txq->txq_snext];
   6203 		dmamap = txs->txs_dmamap;
   6204 
   6205 		use_tso = (m0->m_pkthdr.csum_flags &
   6206 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6207 
   6208 		/*
   6209 		 * So says the Linux driver:
   6210 		 * The controller does a simple calculation to make sure
   6211 		 * there is enough room in the FIFO before initiating the
   6212 		 * DMA for each buffer.  The calc is:
   6213 		 *	4 = ceil(buffer len / MSS)
   6214 		 * To make sure we don't overrun the FIFO, adjust the max
   6215 		 * buffer len if the MSS drops.
   6216 		 */
   6217 		dmamap->dm_maxsegsz =
   6218 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6219 		    ? m0->m_pkthdr.segsz << 2
   6220 		    : WTX_MAX_LEN;
   6221 
   6222 		/*
   6223 		 * Load the DMA map.  If this fails, the packet either
   6224 		 * didn't fit in the allotted number of segments, or we
   6225 		 * were short on resources.  For the too-many-segments
   6226 		 * case, we simply report an error and drop the packet,
   6227 		 * since we can't sanely copy a jumbo packet to a single
   6228 		 * buffer.
   6229 		 */
   6230 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6231 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6232 		if (error) {
   6233 			if (error == EFBIG) {
   6234 				WM_Q_EVCNT_INCR(txq, txdrop);
   6235 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6236 				    "DMA segments, dropping...\n",
   6237 				    device_xname(sc->sc_dev));
   6238 				wm_dump_mbuf_chain(sc, m0);
   6239 				m_freem(m0);
   6240 				continue;
   6241 			}
   6242 			/*  Short on resources, just stop for now. */
   6243 			DPRINTF(WM_DEBUG_TX,
   6244 			    ("%s: TX: dmamap load failed: %d\n",
   6245 			    device_xname(sc->sc_dev), error));
   6246 			break;
   6247 		}
   6248 
   6249 		segs_needed = dmamap->dm_nsegs;
   6250 		if (use_tso) {
   6251 			/* For sentinel descriptor; see below. */
   6252 			segs_needed++;
   6253 		}
   6254 
   6255 		/*
   6256 		 * Ensure we have enough descriptors free to describe
   6257 		 * the packet.  Note, we always reserve one descriptor
   6258 		 * at the end of the ring due to the semantics of the
   6259 		 * TDT register, plus one more in the event we need
   6260 		 * to load offload context.
   6261 		 */
   6262 		if (segs_needed > txq->txq_free - 2) {
   6263 			/*
   6264 			 * Not enough free descriptors to transmit this
   6265 			 * packet.  We haven't committed anything yet,
   6266 			 * so just unload the DMA map, put the packet
   6267 			 * pack on the queue, and punt.  Notify the upper
   6268 			 * layer that there are no more slots left.
   6269 			 */
   6270 			DPRINTF(WM_DEBUG_TX,
   6271 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6272 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6273 			    segs_needed, txq->txq_free - 1));
   6274 			ifp->if_flags |= IFF_OACTIVE;
   6275 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6276 			WM_Q_EVCNT_INCR(txq, txdstall);
   6277 			break;
   6278 		}
   6279 
   6280 		/*
   6281 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6282 		 * once we know we can transmit the packet, since we
   6283 		 * do some internal FIFO space accounting here.
   6284 		 */
   6285 		if (sc->sc_type == WM_T_82547 &&
   6286 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6287 			DPRINTF(WM_DEBUG_TX,
   6288 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6289 			    device_xname(sc->sc_dev)));
   6290 			ifp->if_flags |= IFF_OACTIVE;
   6291 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6292 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6293 			break;
   6294 		}
   6295 
   6296 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6297 
   6298 		DPRINTF(WM_DEBUG_TX,
   6299 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6300 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6301 
   6302 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6303 
   6304 		/*
   6305 		 * Store a pointer to the packet so that we can free it
   6306 		 * later.
   6307 		 *
   6308 		 * Initially, we consider the number of descriptors the
   6309 		 * packet uses the number of DMA segments.  This may be
   6310 		 * incremented by 1 if we do checksum offload (a descriptor
   6311 		 * is used to set the checksum context).
   6312 		 */
   6313 		txs->txs_mbuf = m0;
   6314 		txs->txs_firstdesc = txq->txq_next;
   6315 		txs->txs_ndesc = segs_needed;
   6316 
   6317 		/* Set up offload parameters for this packet. */
   6318 		if (m0->m_pkthdr.csum_flags &
   6319 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6320 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6321 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6322 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6323 					  &cksumfields) != 0) {
   6324 				/* Error message already displayed. */
   6325 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6326 				continue;
   6327 			}
   6328 		} else {
   6329 			cksumcmd = 0;
   6330 			cksumfields = 0;
   6331 		}
   6332 
   6333 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6334 
   6335 		/* Sync the DMA map. */
   6336 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6337 		    BUS_DMASYNC_PREWRITE);
   6338 
   6339 		/* Initialize the transmit descriptor. */
   6340 		for (nexttx = txq->txq_next, seg = 0;
   6341 		     seg < dmamap->dm_nsegs; seg++) {
   6342 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6343 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6344 			     seglen != 0;
   6345 			     curaddr += curlen, seglen -= curlen,
   6346 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6347 				curlen = seglen;
   6348 
   6349 				/*
   6350 				 * So says the Linux driver:
   6351 				 * Work around for premature descriptor
   6352 				 * write-backs in TSO mode.  Append a
   6353 				 * 4-byte sentinel descriptor.
   6354 				 */
   6355 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6356 				    curlen > 8)
   6357 					curlen -= 4;
   6358 
   6359 				wm_set_dma_addr(
   6360 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6361 				txq->txq_descs[nexttx].wtx_cmdlen
   6362 				    = htole32(cksumcmd | curlen);
   6363 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6364 				    = 0;
   6365 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6366 				    = cksumfields;
   6367 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6368 				lasttx = nexttx;
   6369 
   6370 				DPRINTF(WM_DEBUG_TX,
   6371 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6372 				     "len %#04zx\n",
   6373 				    device_xname(sc->sc_dev), nexttx,
   6374 				    (uint64_t)curaddr, curlen));
   6375 			}
   6376 		}
   6377 
   6378 		KASSERT(lasttx != -1);
   6379 
   6380 		/*
   6381 		 * Set up the command byte on the last descriptor of
   6382 		 * the packet.  If we're in the interrupt delay window,
   6383 		 * delay the interrupt.
   6384 		 */
   6385 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6386 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6387 
   6388 		/*
   6389 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6390 		 * up the descriptor to encapsulate the packet for us.
   6391 		 *
   6392 		 * This is only valid on the last descriptor of the packet.
   6393 		 */
   6394 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6395 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6396 			    htole32(WTX_CMD_VLE);
   6397 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6398 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6399 		}
   6400 
   6401 		txs->txs_lastdesc = lasttx;
   6402 
   6403 		DPRINTF(WM_DEBUG_TX,
   6404 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6405 		    device_xname(sc->sc_dev),
   6406 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6407 
   6408 		/* Sync the descriptors we're using. */
   6409 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6410 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6411 
   6412 		/* Give the packet to the chip. */
   6413 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6414 
   6415 		DPRINTF(WM_DEBUG_TX,
   6416 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6417 
   6418 		DPRINTF(WM_DEBUG_TX,
   6419 		    ("%s: TX: finished transmitting packet, job %d\n",
   6420 		    device_xname(sc->sc_dev), txq->txq_snext));
   6421 
   6422 		/* Advance the tx pointer. */
   6423 		txq->txq_free -= txs->txs_ndesc;
   6424 		txq->txq_next = nexttx;
   6425 
   6426 		txq->txq_sfree--;
   6427 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6428 
   6429 		/* Pass the packet to any BPF listeners. */
   6430 		bpf_mtap(ifp, m0);
   6431 	}
   6432 
   6433 	if (m0 != NULL) {
   6434 		ifp->if_flags |= IFF_OACTIVE;
   6435 		WM_Q_EVCNT_INCR(txq, txdrop);
   6436 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6437 			__func__));
   6438 		m_freem(m0);
   6439 	}
   6440 
   6441 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6442 		/* No more slots; notify upper layer. */
   6443 		ifp->if_flags |= IFF_OACTIVE;
   6444 	}
   6445 
   6446 	if (txq->txq_free != ofree) {
   6447 		/* Set a watchdog timer in case the chip flakes out. */
   6448 		ifp->if_timer = 5;
   6449 	}
   6450 }
   6451 
   6452 /*
   6453  * wm_nq_tx_offload:
   6454  *
   6455  *	Set up TCP/IP checksumming parameters for the
   6456  *	specified packet, for NEWQUEUE devices
   6457  */
   6458 static int
   6459 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6460     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6461 {
   6462 	struct mbuf *m0 = txs->txs_mbuf;
   6463 	struct m_tag *mtag;
   6464 	uint32_t vl_len, mssidx, cmdc;
   6465 	struct ether_header *eh;
   6466 	int offset, iphl;
   6467 
   6468 	/*
   6469 	 * XXX It would be nice if the mbuf pkthdr had offset
   6470 	 * fields for the protocol headers.
   6471 	 */
   6472 	*cmdlenp = 0;
   6473 	*fieldsp = 0;
   6474 
   6475 	eh = mtod(m0, struct ether_header *);
   6476 	switch (htons(eh->ether_type)) {
   6477 	case ETHERTYPE_IP:
   6478 	case ETHERTYPE_IPV6:
   6479 		offset = ETHER_HDR_LEN;
   6480 		break;
   6481 
   6482 	case ETHERTYPE_VLAN:
   6483 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6484 		break;
   6485 
   6486 	default:
   6487 		/* Don't support this protocol or encapsulation. */
   6488 		*do_csum = false;
   6489 		return 0;
   6490 	}
   6491 	*do_csum = true;
   6492 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6493 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6494 
   6495 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6496 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6497 
   6498 	if ((m0->m_pkthdr.csum_flags &
   6499 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6500 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6501 	} else {
   6502 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6503 	}
   6504 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6505 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6506 
   6507 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6508 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6509 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6510 		*cmdlenp |= NQTX_CMD_VLE;
   6511 	}
   6512 
   6513 	mssidx = 0;
   6514 
   6515 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6516 		int hlen = offset + iphl;
   6517 		int tcp_hlen;
   6518 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6519 
   6520 		if (__predict_false(m0->m_len <
   6521 				    (hlen + sizeof(struct tcphdr)))) {
   6522 			/*
   6523 			 * TCP/IP headers are not in the first mbuf; we need
   6524 			 * to do this the slow and painful way.  Let's just
   6525 			 * hope this doesn't happen very often.
   6526 			 */
   6527 			struct tcphdr th;
   6528 
   6529 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6530 
   6531 			m_copydata(m0, hlen, sizeof(th), &th);
   6532 			if (v4) {
   6533 				struct ip ip;
   6534 
   6535 				m_copydata(m0, offset, sizeof(ip), &ip);
   6536 				ip.ip_len = 0;
   6537 				m_copyback(m0,
   6538 				    offset + offsetof(struct ip, ip_len),
   6539 				    sizeof(ip.ip_len), &ip.ip_len);
   6540 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6541 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6542 			} else {
   6543 				struct ip6_hdr ip6;
   6544 
   6545 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6546 				ip6.ip6_plen = 0;
   6547 				m_copyback(m0,
   6548 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6549 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6550 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6551 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6552 			}
   6553 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6554 			    sizeof(th.th_sum), &th.th_sum);
   6555 
   6556 			tcp_hlen = th.th_off << 2;
   6557 		} else {
   6558 			/*
   6559 			 * TCP/IP headers are in the first mbuf; we can do
   6560 			 * this the easy way.
   6561 			 */
   6562 			struct tcphdr *th;
   6563 
   6564 			if (v4) {
   6565 				struct ip *ip =
   6566 				    (void *)(mtod(m0, char *) + offset);
   6567 				th = (void *)(mtod(m0, char *) + hlen);
   6568 
   6569 				ip->ip_len = 0;
   6570 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6571 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6572 			} else {
   6573 				struct ip6_hdr *ip6 =
   6574 				    (void *)(mtod(m0, char *) + offset);
   6575 				th = (void *)(mtod(m0, char *) + hlen);
   6576 
   6577 				ip6->ip6_plen = 0;
   6578 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6579 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6580 			}
   6581 			tcp_hlen = th->th_off << 2;
   6582 		}
   6583 		hlen += tcp_hlen;
   6584 		*cmdlenp |= NQTX_CMD_TSE;
   6585 
   6586 		if (v4) {
   6587 			WM_Q_EVCNT_INCR(txq, txtso);
   6588 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6589 		} else {
   6590 			WM_Q_EVCNT_INCR(txq, txtso6);
   6591 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6592 		}
   6593 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6594 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6595 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6596 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6597 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6598 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6599 	} else {
   6600 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6601 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6602 	}
   6603 
   6604 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6605 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6606 		cmdc |= NQTXC_CMD_IP4;
   6607 	}
   6608 
   6609 	if (m0->m_pkthdr.csum_flags &
   6610 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6611 		WM_Q_EVCNT_INCR(txq, txtusum);
   6612 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6613 			cmdc |= NQTXC_CMD_TCP;
   6614 		} else {
   6615 			cmdc |= NQTXC_CMD_UDP;
   6616 		}
   6617 		cmdc |= NQTXC_CMD_IP4;
   6618 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6619 	}
   6620 	if (m0->m_pkthdr.csum_flags &
   6621 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6622 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6623 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6624 			cmdc |= NQTXC_CMD_TCP;
   6625 		} else {
   6626 			cmdc |= NQTXC_CMD_UDP;
   6627 		}
   6628 		cmdc |= NQTXC_CMD_IP6;
   6629 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6630 	}
   6631 
   6632 	/* Fill in the context descriptor. */
   6633 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6634 	    htole32(vl_len);
   6635 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6636 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6637 	    htole32(cmdc);
   6638 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6639 	    htole32(mssidx);
   6640 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6641 	DPRINTF(WM_DEBUG_TX,
   6642 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6643 	    txq->txq_next, 0, vl_len));
   6644 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6645 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6646 	txs->txs_ndesc++;
   6647 	return 0;
   6648 }
   6649 
   6650 /*
   6651  * wm_nq_start:		[ifnet interface function]
   6652  *
   6653  *	Start packet transmission on the interface for NEWQUEUE devices
   6654  */
   6655 static void
   6656 wm_nq_start(struct ifnet *ifp)
   6657 {
   6658 	struct wm_softc *sc = ifp->if_softc;
   6659 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6660 
   6661 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6662 
   6663 	mutex_enter(txq->txq_lock);
   6664 	if (!sc->sc_stopping)
   6665 		wm_nq_start_locked(ifp);
   6666 	mutex_exit(txq->txq_lock);
   6667 }
   6668 
   6669 static void
   6670 wm_nq_start_locked(struct ifnet *ifp)
   6671 {
   6672 	struct wm_softc *sc = ifp->if_softc;
   6673 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6674 
   6675 	wm_nq_send_common_locked(ifp, txq, false);
   6676 }
   6677 
   6678 static inline int
   6679 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6680 {
   6681 	struct wm_softc *sc = ifp->if_softc;
   6682 	u_int cpuid = cpu_index(curcpu());
   6683 
   6684 	/*
   6685 	 * Currently, simple distribute strategy.
   6686 	 * TODO:
   6687 	 * destribute by flowid(RSS has value).
   6688 	 */
   6689 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6690 }
   6691 
   6692 static int
   6693 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6694 {
   6695 	int qid;
   6696 	struct wm_softc *sc = ifp->if_softc;
   6697 	struct wm_txqueue *txq;
   6698 
   6699 	qid = wm_nq_select_txqueue(ifp, m);
   6700 	txq = &sc->sc_queue[qid].wmq_txq;
   6701 
   6702 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6703 		m_freem(m);
   6704 		WM_Q_EVCNT_INCR(txq, txdrop);
   6705 		return ENOBUFS;
   6706 	}
   6707 
   6708 	if (mutex_tryenter(txq->txq_lock)) {
   6709 		/* XXXX should be per TX queue */
   6710 		ifp->if_obytes += m->m_pkthdr.len;
   6711 		if (m->m_flags & M_MCAST)
   6712 			ifp->if_omcasts++;
   6713 
   6714 		if (!sc->sc_stopping)
   6715 			wm_nq_transmit_locked(ifp, txq);
   6716 		mutex_exit(txq->txq_lock);
   6717 	}
   6718 
   6719 	return 0;
   6720 }
   6721 
   6722 static void
   6723 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6724 {
   6725 
   6726 	wm_nq_send_common_locked(ifp, txq, true);
   6727 }
   6728 
   6729 static void
   6730 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6731     bool is_transmit)
   6732 {
   6733 	struct wm_softc *sc = ifp->if_softc;
   6734 	struct mbuf *m0;
   6735 	struct m_tag *mtag;
   6736 	struct wm_txsoft *txs;
   6737 	bus_dmamap_t dmamap;
   6738 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6739 	bool do_csum, sent;
   6740 
   6741 	KASSERT(mutex_owned(txq->txq_lock));
   6742 
   6743 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6744 		return;
   6745 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6746 		return;
   6747 
   6748 	sent = false;
   6749 
   6750 	/*
   6751 	 * Loop through the send queue, setting up transmit descriptors
   6752 	 * until we drain the queue, or use up all available transmit
   6753 	 * descriptors.
   6754 	 */
   6755 	for (;;) {
   6756 		m0 = NULL;
   6757 
   6758 		/* Get a work queue entry. */
   6759 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6760 			wm_txeof(sc, txq);
   6761 			if (txq->txq_sfree == 0) {
   6762 				DPRINTF(WM_DEBUG_TX,
   6763 				    ("%s: TX: no free job descriptors\n",
   6764 					device_xname(sc->sc_dev)));
   6765 				WM_Q_EVCNT_INCR(txq, txsstall);
   6766 				break;
   6767 			}
   6768 		}
   6769 
   6770 		/* Grab a packet off the queue. */
   6771 		if (is_transmit)
   6772 			m0 = pcq_get(txq->txq_interq);
   6773 		else
   6774 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6775 		if (m0 == NULL)
   6776 			break;
   6777 
   6778 		DPRINTF(WM_DEBUG_TX,
   6779 		    ("%s: TX: have packet to transmit: %p\n",
   6780 		    device_xname(sc->sc_dev), m0));
   6781 
   6782 		txs = &txq->txq_soft[txq->txq_snext];
   6783 		dmamap = txs->txs_dmamap;
   6784 
   6785 		/*
   6786 		 * Load the DMA map.  If this fails, the packet either
   6787 		 * didn't fit in the allotted number of segments, or we
   6788 		 * were short on resources.  For the too-many-segments
   6789 		 * case, we simply report an error and drop the packet,
   6790 		 * since we can't sanely copy a jumbo packet to a single
   6791 		 * buffer.
   6792 		 */
   6793 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6794 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6795 		if (error) {
   6796 			if (error == EFBIG) {
   6797 				WM_Q_EVCNT_INCR(txq, txdrop);
   6798 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6799 				    "DMA segments, dropping...\n",
   6800 				    device_xname(sc->sc_dev));
   6801 				wm_dump_mbuf_chain(sc, m0);
   6802 				m_freem(m0);
   6803 				continue;
   6804 			}
   6805 			/* Short on resources, just stop for now. */
   6806 			DPRINTF(WM_DEBUG_TX,
   6807 			    ("%s: TX: dmamap load failed: %d\n",
   6808 			    device_xname(sc->sc_dev), error));
   6809 			break;
   6810 		}
   6811 
   6812 		segs_needed = dmamap->dm_nsegs;
   6813 
   6814 		/*
   6815 		 * Ensure we have enough descriptors free to describe
   6816 		 * the packet.  Note, we always reserve one descriptor
   6817 		 * at the end of the ring due to the semantics of the
   6818 		 * TDT register, plus one more in the event we need
   6819 		 * to load offload context.
   6820 		 */
   6821 		if (segs_needed > txq->txq_free - 2) {
   6822 			/*
   6823 			 * Not enough free descriptors to transmit this
   6824 			 * packet.  We haven't committed anything yet,
   6825 			 * so just unload the DMA map, put the packet
   6826 			 * pack on the queue, and punt.  Notify the upper
   6827 			 * layer that there are no more slots left.
   6828 			 */
   6829 			DPRINTF(WM_DEBUG_TX,
   6830 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6831 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6832 			    segs_needed, txq->txq_free - 1));
   6833 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6834 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6835 			WM_Q_EVCNT_INCR(txq, txdstall);
   6836 			break;
   6837 		}
   6838 
   6839 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6840 
   6841 		DPRINTF(WM_DEBUG_TX,
   6842 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6843 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6844 
   6845 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6846 
   6847 		/*
   6848 		 * Store a pointer to the packet so that we can free it
   6849 		 * later.
   6850 		 *
   6851 		 * Initially, we consider the number of descriptors the
   6852 		 * packet uses the number of DMA segments.  This may be
   6853 		 * incremented by 1 if we do checksum offload (a descriptor
   6854 		 * is used to set the checksum context).
   6855 		 */
   6856 		txs->txs_mbuf = m0;
   6857 		txs->txs_firstdesc = txq->txq_next;
   6858 		txs->txs_ndesc = segs_needed;
   6859 
   6860 		/* Set up offload parameters for this packet. */
   6861 		uint32_t cmdlen, fields, dcmdlen;
   6862 		if (m0->m_pkthdr.csum_flags &
   6863 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6864 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6865 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6866 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6867 			    &do_csum) != 0) {
   6868 				/* Error message already displayed. */
   6869 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6870 				continue;
   6871 			}
   6872 		} else {
   6873 			do_csum = false;
   6874 			cmdlen = 0;
   6875 			fields = 0;
   6876 		}
   6877 
   6878 		/* Sync the DMA map. */
   6879 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6880 		    BUS_DMASYNC_PREWRITE);
   6881 
   6882 		/* Initialize the first transmit descriptor. */
   6883 		nexttx = txq->txq_next;
   6884 		if (!do_csum) {
   6885 			/* setup a legacy descriptor */
   6886 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6887 			    dmamap->dm_segs[0].ds_addr);
   6888 			txq->txq_descs[nexttx].wtx_cmdlen =
   6889 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6890 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6891 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6892 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6893 			    NULL) {
   6894 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6895 				    htole32(WTX_CMD_VLE);
   6896 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6897 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6898 			} else {
   6899 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6900 			}
   6901 			dcmdlen = 0;
   6902 		} else {
   6903 			/* setup an advanced data descriptor */
   6904 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6905 			    htole64(dmamap->dm_segs[0].ds_addr);
   6906 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6907 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6908 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6909 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6910 			    htole32(fields);
   6911 			DPRINTF(WM_DEBUG_TX,
   6912 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6913 			    device_xname(sc->sc_dev), nexttx,
   6914 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6915 			DPRINTF(WM_DEBUG_TX,
   6916 			    ("\t 0x%08x%08x\n", fields,
   6917 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6918 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6919 		}
   6920 
   6921 		lasttx = nexttx;
   6922 		nexttx = WM_NEXTTX(txq, nexttx);
   6923 		/*
   6924 		 * fill in the next descriptors. legacy or adcanced format
   6925 		 * is the same here
   6926 		 */
   6927 		for (seg = 1; seg < dmamap->dm_nsegs;
   6928 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6929 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6930 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6931 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6932 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6933 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6934 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6935 			lasttx = nexttx;
   6936 
   6937 			DPRINTF(WM_DEBUG_TX,
   6938 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6939 			     "len %#04zx\n",
   6940 			    device_xname(sc->sc_dev), nexttx,
   6941 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6942 			    dmamap->dm_segs[seg].ds_len));
   6943 		}
   6944 
   6945 		KASSERT(lasttx != -1);
   6946 
   6947 		/*
   6948 		 * Set up the command byte on the last descriptor of
   6949 		 * the packet.  If we're in the interrupt delay window,
   6950 		 * delay the interrupt.
   6951 		 */
   6952 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6953 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6954 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6955 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6956 
   6957 		txs->txs_lastdesc = lasttx;
   6958 
   6959 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6960 		    device_xname(sc->sc_dev),
   6961 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6962 
   6963 		/* Sync the descriptors we're using. */
   6964 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6965 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6966 
   6967 		/* Give the packet to the chip. */
   6968 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6969 		sent = true;
   6970 
   6971 		DPRINTF(WM_DEBUG_TX,
   6972 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6973 
   6974 		DPRINTF(WM_DEBUG_TX,
   6975 		    ("%s: TX: finished transmitting packet, job %d\n",
   6976 		    device_xname(sc->sc_dev), txq->txq_snext));
   6977 
   6978 		/* Advance the tx pointer. */
   6979 		txq->txq_free -= txs->txs_ndesc;
   6980 		txq->txq_next = nexttx;
   6981 
   6982 		txq->txq_sfree--;
   6983 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6984 
   6985 		/* Pass the packet to any BPF listeners. */
   6986 		bpf_mtap(ifp, m0);
   6987 	}
   6988 
   6989 	if (m0 != NULL) {
   6990 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6991 		WM_Q_EVCNT_INCR(txq, txdrop);
   6992 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6993 			__func__));
   6994 		m_freem(m0);
   6995 	}
   6996 
   6997 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6998 		/* No more slots; notify upper layer. */
   6999 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7000 	}
   7001 
   7002 	if (sent) {
   7003 		/* Set a watchdog timer in case the chip flakes out. */
   7004 		ifp->if_timer = 5;
   7005 	}
   7006 }
   7007 
   7008 /* Interrupt */
   7009 
   7010 /*
   7011  * wm_txeof:
   7012  *
   7013  *	Helper; handle transmit interrupts.
   7014  */
   7015 static int
   7016 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7017 {
   7018 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7019 	struct wm_txsoft *txs;
   7020 	bool processed = false;
   7021 	int count = 0;
   7022 	int i;
   7023 	uint8_t status;
   7024 
   7025 	KASSERT(mutex_owned(txq->txq_lock));
   7026 
   7027 	if (sc->sc_stopping)
   7028 		return 0;
   7029 
   7030 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7031 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7032 	else
   7033 		ifp->if_flags &= ~IFF_OACTIVE;
   7034 
   7035 	/*
   7036 	 * Go through the Tx list and free mbufs for those
   7037 	 * frames which have been transmitted.
   7038 	 */
   7039 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7040 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7041 		txs = &txq->txq_soft[i];
   7042 
   7043 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7044 			device_xname(sc->sc_dev), i));
   7045 
   7046 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7047 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7048 
   7049 		status =
   7050 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7051 		if ((status & WTX_ST_DD) == 0) {
   7052 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7053 			    BUS_DMASYNC_PREREAD);
   7054 			break;
   7055 		}
   7056 
   7057 		processed = true;
   7058 		count++;
   7059 		DPRINTF(WM_DEBUG_TX,
   7060 		    ("%s: TX: job %d done: descs %d..%d\n",
   7061 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7062 		    txs->txs_lastdesc));
   7063 
   7064 		/*
   7065 		 * XXX We should probably be using the statistics
   7066 		 * XXX registers, but I don't know if they exist
   7067 		 * XXX on chips before the i82544.
   7068 		 */
   7069 
   7070 #ifdef WM_EVENT_COUNTERS
   7071 		if (status & WTX_ST_TU)
   7072 			WM_Q_EVCNT_INCR(txq, tu);
   7073 #endif /* WM_EVENT_COUNTERS */
   7074 
   7075 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7076 			ifp->if_oerrors++;
   7077 			if (status & WTX_ST_LC)
   7078 				log(LOG_WARNING, "%s: late collision\n",
   7079 				    device_xname(sc->sc_dev));
   7080 			else if (status & WTX_ST_EC) {
   7081 				ifp->if_collisions += 16;
   7082 				log(LOG_WARNING, "%s: excessive collisions\n",
   7083 				    device_xname(sc->sc_dev));
   7084 			}
   7085 		} else
   7086 			ifp->if_opackets++;
   7087 
   7088 		txq->txq_free += txs->txs_ndesc;
   7089 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7090 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7091 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7092 		m_freem(txs->txs_mbuf);
   7093 		txs->txs_mbuf = NULL;
   7094 	}
   7095 
   7096 	/* Update the dirty transmit buffer pointer. */
   7097 	txq->txq_sdirty = i;
   7098 	DPRINTF(WM_DEBUG_TX,
   7099 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7100 
   7101 	if (count != 0)
   7102 		rnd_add_uint32(&sc->rnd_source, count);
   7103 
   7104 	/*
   7105 	 * If there are no more pending transmissions, cancel the watchdog
   7106 	 * timer.
   7107 	 */
   7108 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7109 		ifp->if_timer = 0;
   7110 
   7111 	return processed;
   7112 }
   7113 
   7114 /*
   7115  * wm_rxeof:
   7116  *
   7117  *	Helper; handle receive interrupts.
   7118  */
   7119 static void
   7120 wm_rxeof(struct wm_rxqueue *rxq)
   7121 {
   7122 	struct wm_softc *sc = rxq->rxq_sc;
   7123 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7124 	struct wm_rxsoft *rxs;
   7125 	struct mbuf *m;
   7126 	int i, len;
   7127 	int count = 0;
   7128 	uint8_t status, errors;
   7129 	uint16_t vlantag;
   7130 
   7131 	KASSERT(mutex_owned(rxq->rxq_lock));
   7132 
   7133 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7134 		rxs = &rxq->rxq_soft[i];
   7135 
   7136 		DPRINTF(WM_DEBUG_RX,
   7137 		    ("%s: RX: checking descriptor %d\n",
   7138 		    device_xname(sc->sc_dev), i));
   7139 
   7140 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7141 
   7142 		status = rxq->rxq_descs[i].wrx_status;
   7143 		errors = rxq->rxq_descs[i].wrx_errors;
   7144 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7145 		vlantag = rxq->rxq_descs[i].wrx_special;
   7146 
   7147 		if ((status & WRX_ST_DD) == 0) {
   7148 			/* We have processed all of the receive descriptors. */
   7149 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7150 			break;
   7151 		}
   7152 
   7153 		count++;
   7154 		if (__predict_false(rxq->rxq_discard)) {
   7155 			DPRINTF(WM_DEBUG_RX,
   7156 			    ("%s: RX: discarding contents of descriptor %d\n",
   7157 			    device_xname(sc->sc_dev), i));
   7158 			wm_init_rxdesc(rxq, i);
   7159 			if (status & WRX_ST_EOP) {
   7160 				/* Reset our state. */
   7161 				DPRINTF(WM_DEBUG_RX,
   7162 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7163 				    device_xname(sc->sc_dev)));
   7164 				rxq->rxq_discard = 0;
   7165 			}
   7166 			continue;
   7167 		}
   7168 
   7169 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7170 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7171 
   7172 		m = rxs->rxs_mbuf;
   7173 
   7174 		/*
   7175 		 * Add a new receive buffer to the ring, unless of
   7176 		 * course the length is zero. Treat the latter as a
   7177 		 * failed mapping.
   7178 		 */
   7179 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7180 			/*
   7181 			 * Failed, throw away what we've done so
   7182 			 * far, and discard the rest of the packet.
   7183 			 */
   7184 			ifp->if_ierrors++;
   7185 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7186 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7187 			wm_init_rxdesc(rxq, i);
   7188 			if ((status & WRX_ST_EOP) == 0)
   7189 				rxq->rxq_discard = 1;
   7190 			if (rxq->rxq_head != NULL)
   7191 				m_freem(rxq->rxq_head);
   7192 			WM_RXCHAIN_RESET(rxq);
   7193 			DPRINTF(WM_DEBUG_RX,
   7194 			    ("%s: RX: Rx buffer allocation failed, "
   7195 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7196 			    rxq->rxq_discard ? " (discard)" : ""));
   7197 			continue;
   7198 		}
   7199 
   7200 		m->m_len = len;
   7201 		rxq->rxq_len += len;
   7202 		DPRINTF(WM_DEBUG_RX,
   7203 		    ("%s: RX: buffer at %p len %d\n",
   7204 		    device_xname(sc->sc_dev), m->m_data, len));
   7205 
   7206 		/* If this is not the end of the packet, keep looking. */
   7207 		if ((status & WRX_ST_EOP) == 0) {
   7208 			WM_RXCHAIN_LINK(rxq, m);
   7209 			DPRINTF(WM_DEBUG_RX,
   7210 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7211 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7212 			continue;
   7213 		}
   7214 
   7215 		/*
   7216 		 * Okay, we have the entire packet now.  The chip is
   7217 		 * configured to include the FCS except I350 and I21[01]
   7218 		 * (not all chips can be configured to strip it),
   7219 		 * so we need to trim it.
   7220 		 * May need to adjust length of previous mbuf in the
   7221 		 * chain if the current mbuf is too short.
   7222 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7223 		 * is always set in I350, so we don't trim it.
   7224 		 */
   7225 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7226 		    && (sc->sc_type != WM_T_I210)
   7227 		    && (sc->sc_type != WM_T_I211)) {
   7228 			if (m->m_len < ETHER_CRC_LEN) {
   7229 				rxq->rxq_tail->m_len
   7230 				    -= (ETHER_CRC_LEN - m->m_len);
   7231 				m->m_len = 0;
   7232 			} else
   7233 				m->m_len -= ETHER_CRC_LEN;
   7234 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7235 		} else
   7236 			len = rxq->rxq_len;
   7237 
   7238 		WM_RXCHAIN_LINK(rxq, m);
   7239 
   7240 		*rxq->rxq_tailp = NULL;
   7241 		m = rxq->rxq_head;
   7242 
   7243 		WM_RXCHAIN_RESET(rxq);
   7244 
   7245 		DPRINTF(WM_DEBUG_RX,
   7246 		    ("%s: RX: have entire packet, len -> %d\n",
   7247 		    device_xname(sc->sc_dev), len));
   7248 
   7249 		/* If an error occurred, update stats and drop the packet. */
   7250 		if (errors &
   7251 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7252 			if (errors & WRX_ER_SE)
   7253 				log(LOG_WARNING, "%s: symbol error\n",
   7254 				    device_xname(sc->sc_dev));
   7255 			else if (errors & WRX_ER_SEQ)
   7256 				log(LOG_WARNING, "%s: receive sequence error\n",
   7257 				    device_xname(sc->sc_dev));
   7258 			else if (errors & WRX_ER_CE)
   7259 				log(LOG_WARNING, "%s: CRC error\n",
   7260 				    device_xname(sc->sc_dev));
   7261 			m_freem(m);
   7262 			continue;
   7263 		}
   7264 
   7265 		/* No errors.  Receive the packet. */
   7266 		m_set_rcvif(m, ifp);
   7267 		m->m_pkthdr.len = len;
   7268 
   7269 		/*
   7270 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7271 		 * for us.  Associate the tag with the packet.
   7272 		 */
   7273 		/* XXXX should check for i350 and i354 */
   7274 		if ((status & WRX_ST_VP) != 0) {
   7275 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7276 		}
   7277 
   7278 		/* Set up checksum info for this packet. */
   7279 		if ((status & WRX_ST_IXSM) == 0) {
   7280 			if (status & WRX_ST_IPCS) {
   7281 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7282 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7283 				if (errors & WRX_ER_IPE)
   7284 					m->m_pkthdr.csum_flags |=
   7285 					    M_CSUM_IPv4_BAD;
   7286 			}
   7287 			if (status & WRX_ST_TCPCS) {
   7288 				/*
   7289 				 * Note: we don't know if this was TCP or UDP,
   7290 				 * so we just set both bits, and expect the
   7291 				 * upper layers to deal.
   7292 				 */
   7293 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7294 				m->m_pkthdr.csum_flags |=
   7295 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7296 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7297 				if (errors & WRX_ER_TCPE)
   7298 					m->m_pkthdr.csum_flags |=
   7299 					    M_CSUM_TCP_UDP_BAD;
   7300 			}
   7301 		}
   7302 
   7303 		ifp->if_ipackets++;
   7304 
   7305 		mutex_exit(rxq->rxq_lock);
   7306 
   7307 		/* Pass this up to any BPF listeners. */
   7308 		bpf_mtap(ifp, m);
   7309 
   7310 		/* Pass it on. */
   7311 		if_percpuq_enqueue(sc->sc_ipq, m);
   7312 
   7313 		mutex_enter(rxq->rxq_lock);
   7314 
   7315 		if (sc->sc_stopping)
   7316 			break;
   7317 	}
   7318 
   7319 	/* Update the receive pointer. */
   7320 	rxq->rxq_ptr = i;
   7321 	if (count != 0)
   7322 		rnd_add_uint32(&sc->rnd_source, count);
   7323 
   7324 	DPRINTF(WM_DEBUG_RX,
   7325 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7326 }
   7327 
   7328 /*
   7329  * wm_linkintr_gmii:
   7330  *
   7331  *	Helper; handle link interrupts for GMII.
   7332  */
   7333 static void
   7334 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7335 {
   7336 
   7337 	KASSERT(WM_CORE_LOCKED(sc));
   7338 
   7339 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7340 		__func__));
   7341 
   7342 	if (icr & ICR_LSC) {
   7343 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7344 
   7345 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7346 			wm_gig_downshift_workaround_ich8lan(sc);
   7347 
   7348 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7349 			device_xname(sc->sc_dev)));
   7350 		mii_pollstat(&sc->sc_mii);
   7351 		if (sc->sc_type == WM_T_82543) {
   7352 			int miistatus, active;
   7353 
   7354 			/*
   7355 			 * With 82543, we need to force speed and
   7356 			 * duplex on the MAC equal to what the PHY
   7357 			 * speed and duplex configuration is.
   7358 			 */
   7359 			miistatus = sc->sc_mii.mii_media_status;
   7360 
   7361 			if (miistatus & IFM_ACTIVE) {
   7362 				active = sc->sc_mii.mii_media_active;
   7363 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7364 				switch (IFM_SUBTYPE(active)) {
   7365 				case IFM_10_T:
   7366 					sc->sc_ctrl |= CTRL_SPEED_10;
   7367 					break;
   7368 				case IFM_100_TX:
   7369 					sc->sc_ctrl |= CTRL_SPEED_100;
   7370 					break;
   7371 				case IFM_1000_T:
   7372 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7373 					break;
   7374 				default:
   7375 					/*
   7376 					 * fiber?
   7377 					 * Shoud not enter here.
   7378 					 */
   7379 					printf("unknown media (%x)\n", active);
   7380 					break;
   7381 				}
   7382 				if (active & IFM_FDX)
   7383 					sc->sc_ctrl |= CTRL_FD;
   7384 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7385 			}
   7386 		} else if ((sc->sc_type == WM_T_ICH8)
   7387 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7388 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7389 		} else if (sc->sc_type == WM_T_PCH) {
   7390 			wm_k1_gig_workaround_hv(sc,
   7391 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7392 		}
   7393 
   7394 		if ((sc->sc_phytype == WMPHY_82578)
   7395 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7396 			== IFM_1000_T)) {
   7397 
   7398 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7399 				delay(200*1000); /* XXX too big */
   7400 
   7401 				/* Link stall fix for link up */
   7402 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7403 				    HV_MUX_DATA_CTRL,
   7404 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7405 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7406 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7407 				    HV_MUX_DATA_CTRL,
   7408 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7409 			}
   7410 		}
   7411 	} else if (icr & ICR_RXSEQ) {
   7412 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7413 			device_xname(sc->sc_dev)));
   7414 	}
   7415 }
   7416 
   7417 /*
   7418  * wm_linkintr_tbi:
   7419  *
   7420  *	Helper; handle link interrupts for TBI mode.
   7421  */
   7422 static void
   7423 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7424 {
   7425 	uint32_t status;
   7426 
   7427 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7428 		__func__));
   7429 
   7430 	status = CSR_READ(sc, WMREG_STATUS);
   7431 	if (icr & ICR_LSC) {
   7432 		if (status & STATUS_LU) {
   7433 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7434 			    device_xname(sc->sc_dev),
   7435 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7436 			/*
   7437 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7438 			 * so we should update sc->sc_ctrl
   7439 			 */
   7440 
   7441 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7442 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7443 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7444 			if (status & STATUS_FD)
   7445 				sc->sc_tctl |=
   7446 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7447 			else
   7448 				sc->sc_tctl |=
   7449 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7450 			if (sc->sc_ctrl & CTRL_TFCE)
   7451 				sc->sc_fcrtl |= FCRTL_XONE;
   7452 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7453 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7454 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7455 				      sc->sc_fcrtl);
   7456 			sc->sc_tbi_linkup = 1;
   7457 		} else {
   7458 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7459 			    device_xname(sc->sc_dev)));
   7460 			sc->sc_tbi_linkup = 0;
   7461 		}
   7462 		/* Update LED */
   7463 		wm_tbi_serdes_set_linkled(sc);
   7464 	} else if (icr & ICR_RXSEQ) {
   7465 		DPRINTF(WM_DEBUG_LINK,
   7466 		    ("%s: LINK: Receive sequence error\n",
   7467 		    device_xname(sc->sc_dev)));
   7468 	}
   7469 }
   7470 
   7471 /*
   7472  * wm_linkintr_serdes:
   7473  *
   7474  *	Helper; handle link interrupts for TBI mode.
   7475  */
   7476 static void
   7477 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7478 {
   7479 	struct mii_data *mii = &sc->sc_mii;
   7480 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7481 	uint32_t pcs_adv, pcs_lpab, reg;
   7482 
   7483 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7484 		__func__));
   7485 
   7486 	if (icr & ICR_LSC) {
   7487 		/* Check PCS */
   7488 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7489 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7490 			mii->mii_media_status |= IFM_ACTIVE;
   7491 			sc->sc_tbi_linkup = 1;
   7492 		} else {
   7493 			mii->mii_media_status |= IFM_NONE;
   7494 			sc->sc_tbi_linkup = 0;
   7495 			wm_tbi_serdes_set_linkled(sc);
   7496 			return;
   7497 		}
   7498 		mii->mii_media_active |= IFM_1000_SX;
   7499 		if ((reg & PCS_LSTS_FDX) != 0)
   7500 			mii->mii_media_active |= IFM_FDX;
   7501 		else
   7502 			mii->mii_media_active |= IFM_HDX;
   7503 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7504 			/* Check flow */
   7505 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7506 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7507 				DPRINTF(WM_DEBUG_LINK,
   7508 				    ("XXX LINKOK but not ACOMP\n"));
   7509 				return;
   7510 			}
   7511 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7512 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7513 			DPRINTF(WM_DEBUG_LINK,
   7514 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7515 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7516 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7517 				mii->mii_media_active |= IFM_FLOW
   7518 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7519 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7520 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7521 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7522 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7523 				mii->mii_media_active |= IFM_FLOW
   7524 				    | IFM_ETH_TXPAUSE;
   7525 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7526 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7527 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7528 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7529 				mii->mii_media_active |= IFM_FLOW
   7530 				    | IFM_ETH_RXPAUSE;
   7531 		}
   7532 		/* Update LED */
   7533 		wm_tbi_serdes_set_linkled(sc);
   7534 	} else {
   7535 		DPRINTF(WM_DEBUG_LINK,
   7536 		    ("%s: LINK: Receive sequence error\n",
   7537 		    device_xname(sc->sc_dev)));
   7538 	}
   7539 }
   7540 
   7541 /*
   7542  * wm_linkintr:
   7543  *
   7544  *	Helper; handle link interrupts.
   7545  */
   7546 static void
   7547 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7548 {
   7549 
   7550 	KASSERT(WM_CORE_LOCKED(sc));
   7551 
   7552 	if (sc->sc_flags & WM_F_HAS_MII)
   7553 		wm_linkintr_gmii(sc, icr);
   7554 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7555 	    && (sc->sc_type >= WM_T_82575))
   7556 		wm_linkintr_serdes(sc, icr);
   7557 	else
   7558 		wm_linkintr_tbi(sc, icr);
   7559 }
   7560 
   7561 /*
   7562  * wm_intr_legacy:
   7563  *
   7564  *	Interrupt service routine for INTx and MSI.
   7565  */
   7566 static int
   7567 wm_intr_legacy(void *arg)
   7568 {
   7569 	struct wm_softc *sc = arg;
   7570 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7571 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7572 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7573 	uint32_t icr, rndval = 0;
   7574 	int handled = 0;
   7575 
   7576 	DPRINTF(WM_DEBUG_TX,
   7577 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7578 	while (1 /* CONSTCOND */) {
   7579 		icr = CSR_READ(sc, WMREG_ICR);
   7580 		if ((icr & sc->sc_icr) == 0)
   7581 			break;
   7582 		if (rndval == 0)
   7583 			rndval = icr;
   7584 
   7585 		mutex_enter(rxq->rxq_lock);
   7586 
   7587 		if (sc->sc_stopping) {
   7588 			mutex_exit(rxq->rxq_lock);
   7589 			break;
   7590 		}
   7591 
   7592 		handled = 1;
   7593 
   7594 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7595 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7596 			DPRINTF(WM_DEBUG_RX,
   7597 			    ("%s: RX: got Rx intr 0x%08x\n",
   7598 			    device_xname(sc->sc_dev),
   7599 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7600 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7601 		}
   7602 #endif
   7603 		wm_rxeof(rxq);
   7604 
   7605 		mutex_exit(rxq->rxq_lock);
   7606 		mutex_enter(txq->txq_lock);
   7607 
   7608 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7609 		if (icr & ICR_TXDW) {
   7610 			DPRINTF(WM_DEBUG_TX,
   7611 			    ("%s: TX: got TXDW interrupt\n",
   7612 			    device_xname(sc->sc_dev)));
   7613 			WM_Q_EVCNT_INCR(txq, txdw);
   7614 		}
   7615 #endif
   7616 		wm_txeof(sc, txq);
   7617 
   7618 		mutex_exit(txq->txq_lock);
   7619 		WM_CORE_LOCK(sc);
   7620 
   7621 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7622 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7623 			wm_linkintr(sc, icr);
   7624 		}
   7625 
   7626 		WM_CORE_UNLOCK(sc);
   7627 
   7628 		if (icr & ICR_RXO) {
   7629 #if defined(WM_DEBUG)
   7630 			log(LOG_WARNING, "%s: Receive overrun\n",
   7631 			    device_xname(sc->sc_dev));
   7632 #endif /* defined(WM_DEBUG) */
   7633 		}
   7634 	}
   7635 
   7636 	rnd_add_uint32(&sc->rnd_source, rndval);
   7637 
   7638 	if (handled) {
   7639 		/* Try to get more packets going. */
   7640 		ifp->if_start(ifp);
   7641 	}
   7642 
   7643 	return handled;
   7644 }
   7645 
   7646 static int
   7647 wm_txrxintr_msix(void *arg)
   7648 {
   7649 	struct wm_queue *wmq = arg;
   7650 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7651 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7652 	struct wm_softc *sc = txq->txq_sc;
   7653 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7654 
   7655 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7656 
   7657 	DPRINTF(WM_DEBUG_TX,
   7658 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7659 
   7660 	if (sc->sc_type == WM_T_82574)
   7661 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7662 	else if (sc->sc_type == WM_T_82575)
   7663 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7664 	else
   7665 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7666 
   7667 	if (!sc->sc_stopping) {
   7668 		mutex_enter(txq->txq_lock);
   7669 
   7670 		WM_Q_EVCNT_INCR(txq, txdw);
   7671 		wm_txeof(sc, txq);
   7672 
   7673 		/* Try to get more packets going. */
   7674 		if (pcq_peek(txq->txq_interq) != NULL)
   7675 			wm_nq_transmit_locked(ifp, txq);
   7676 		/*
   7677 		 * There are still some upper layer processing which call
   7678 		 * ifp->if_start(). e.g. ALTQ
   7679 		 */
   7680 		if (wmq->wmq_id == 0) {
   7681 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7682 				wm_nq_start_locked(ifp);
   7683 		}
   7684 		mutex_exit(txq->txq_lock);
   7685 	}
   7686 
   7687 	DPRINTF(WM_DEBUG_RX,
   7688 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7689 
   7690 	if (!sc->sc_stopping) {
   7691 		mutex_enter(rxq->rxq_lock);
   7692 		WM_Q_EVCNT_INCR(rxq, rxintr);
   7693 		wm_rxeof(rxq);
   7694 		mutex_exit(rxq->rxq_lock);
   7695 	}
   7696 
   7697 	if (sc->sc_type == WM_T_82574)
   7698 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7699 	else if (sc->sc_type == WM_T_82575)
   7700 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7701 	else
   7702 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7703 
   7704 	return 1;
   7705 }
   7706 
   7707 /*
   7708  * wm_linkintr_msix:
   7709  *
   7710  *	Interrupt service routine for link status change for MSI-X.
   7711  */
   7712 static int
   7713 wm_linkintr_msix(void *arg)
   7714 {
   7715 	struct wm_softc *sc = arg;
   7716 	uint32_t reg;
   7717 
   7718 	DPRINTF(WM_DEBUG_LINK,
   7719 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7720 
   7721 	reg = CSR_READ(sc, WMREG_ICR);
   7722 	WM_CORE_LOCK(sc);
   7723 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7724 		goto out;
   7725 
   7726 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7727 	wm_linkintr(sc, ICR_LSC);
   7728 
   7729 out:
   7730 	WM_CORE_UNLOCK(sc);
   7731 
   7732 	if (sc->sc_type == WM_T_82574)
   7733 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7734 	else if (sc->sc_type == WM_T_82575)
   7735 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7736 	else
   7737 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7738 
   7739 	return 1;
   7740 }
   7741 
   7742 /*
   7743  * Media related.
   7744  * GMII, SGMII, TBI (and SERDES)
   7745  */
   7746 
   7747 /* Common */
   7748 
   7749 /*
   7750  * wm_tbi_serdes_set_linkled:
   7751  *
   7752  *	Update the link LED on TBI and SERDES devices.
   7753  */
   7754 static void
   7755 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7756 {
   7757 
   7758 	if (sc->sc_tbi_linkup)
   7759 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7760 	else
   7761 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7762 
   7763 	/* 82540 or newer devices are active low */
   7764 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7765 
   7766 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7767 }
   7768 
   7769 /* GMII related */
   7770 
   7771 /*
   7772  * wm_gmii_reset:
   7773  *
   7774  *	Reset the PHY.
   7775  */
   7776 static void
   7777 wm_gmii_reset(struct wm_softc *sc)
   7778 {
   7779 	uint32_t reg;
   7780 	int rv;
   7781 
   7782 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7783 		device_xname(sc->sc_dev), __func__));
   7784 
   7785 	/* Get phy semaphore */
   7786 	switch (sc->sc_type) {
   7787 	case WM_T_82571:
   7788 	case WM_T_82572:
   7789 	case WM_T_82573:
   7790 	case WM_T_82574:
   7791 	case WM_T_82583:
   7792 		 /* XXX should get sw semaphore, too */
   7793 		rv = wm_get_swsm_semaphore(sc);
   7794 		break;
   7795 	case WM_T_82575:
   7796 	case WM_T_82576:
   7797 	case WM_T_82580:
   7798 	case WM_T_I350:
   7799 	case WM_T_I354:
   7800 	case WM_T_I210:
   7801 	case WM_T_I211:
   7802 	case WM_T_80003:
   7803 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7804 		break;
   7805 	case WM_T_ICH8:
   7806 	case WM_T_ICH9:
   7807 	case WM_T_ICH10:
   7808 	case WM_T_PCH:
   7809 	case WM_T_PCH2:
   7810 	case WM_T_PCH_LPT:
   7811 	case WM_T_PCH_SPT:
   7812 		rv = wm_get_swfwhw_semaphore(sc);
   7813 		break;
   7814 	default:
   7815 		/* nothing to do*/
   7816 		rv = 0;
   7817 		break;
   7818 	}
   7819 	if (rv != 0) {
   7820 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7821 		    __func__);
   7822 		return;
   7823 	}
   7824 
   7825 	switch (sc->sc_type) {
   7826 	case WM_T_82542_2_0:
   7827 	case WM_T_82542_2_1:
   7828 		/* null */
   7829 		break;
   7830 	case WM_T_82543:
   7831 		/*
   7832 		 * With 82543, we need to force speed and duplex on the MAC
   7833 		 * equal to what the PHY speed and duplex configuration is.
   7834 		 * In addition, we need to perform a hardware reset on the PHY
   7835 		 * to take it out of reset.
   7836 		 */
   7837 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7838 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7839 
   7840 		/* The PHY reset pin is active-low. */
   7841 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7842 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7843 		    CTRL_EXT_SWDPIN(4));
   7844 		reg |= CTRL_EXT_SWDPIO(4);
   7845 
   7846 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7847 		CSR_WRITE_FLUSH(sc);
   7848 		delay(10*1000);
   7849 
   7850 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7851 		CSR_WRITE_FLUSH(sc);
   7852 		delay(150);
   7853 #if 0
   7854 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7855 #endif
   7856 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7857 		break;
   7858 	case WM_T_82544:	/* reset 10000us */
   7859 	case WM_T_82540:
   7860 	case WM_T_82545:
   7861 	case WM_T_82545_3:
   7862 	case WM_T_82546:
   7863 	case WM_T_82546_3:
   7864 	case WM_T_82541:
   7865 	case WM_T_82541_2:
   7866 	case WM_T_82547:
   7867 	case WM_T_82547_2:
   7868 	case WM_T_82571:	/* reset 100us */
   7869 	case WM_T_82572:
   7870 	case WM_T_82573:
   7871 	case WM_T_82574:
   7872 	case WM_T_82575:
   7873 	case WM_T_82576:
   7874 	case WM_T_82580:
   7875 	case WM_T_I350:
   7876 	case WM_T_I354:
   7877 	case WM_T_I210:
   7878 	case WM_T_I211:
   7879 	case WM_T_82583:
   7880 	case WM_T_80003:
   7881 		/* generic reset */
   7882 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7883 		CSR_WRITE_FLUSH(sc);
   7884 		delay(20000);
   7885 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7886 		CSR_WRITE_FLUSH(sc);
   7887 		delay(20000);
   7888 
   7889 		if ((sc->sc_type == WM_T_82541)
   7890 		    || (sc->sc_type == WM_T_82541_2)
   7891 		    || (sc->sc_type == WM_T_82547)
   7892 		    || (sc->sc_type == WM_T_82547_2)) {
   7893 			/* workaround for igp are done in igp_reset() */
   7894 			/* XXX add code to set LED after phy reset */
   7895 		}
   7896 		break;
   7897 	case WM_T_ICH8:
   7898 	case WM_T_ICH9:
   7899 	case WM_T_ICH10:
   7900 	case WM_T_PCH:
   7901 	case WM_T_PCH2:
   7902 	case WM_T_PCH_LPT:
   7903 	case WM_T_PCH_SPT:
   7904 		/* generic reset */
   7905 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7906 		CSR_WRITE_FLUSH(sc);
   7907 		delay(100);
   7908 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7909 		CSR_WRITE_FLUSH(sc);
   7910 		delay(150);
   7911 		break;
   7912 	default:
   7913 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7914 		    __func__);
   7915 		break;
   7916 	}
   7917 
   7918 	/* release PHY semaphore */
   7919 	switch (sc->sc_type) {
   7920 	case WM_T_82571:
   7921 	case WM_T_82572:
   7922 	case WM_T_82573:
   7923 	case WM_T_82574:
   7924 	case WM_T_82583:
   7925 		 /* XXX should put sw semaphore, too */
   7926 		wm_put_swsm_semaphore(sc);
   7927 		break;
   7928 	case WM_T_82575:
   7929 	case WM_T_82576:
   7930 	case WM_T_82580:
   7931 	case WM_T_I350:
   7932 	case WM_T_I354:
   7933 	case WM_T_I210:
   7934 	case WM_T_I211:
   7935 	case WM_T_80003:
   7936 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7937 		break;
   7938 	case WM_T_ICH8:
   7939 	case WM_T_ICH9:
   7940 	case WM_T_ICH10:
   7941 	case WM_T_PCH:
   7942 	case WM_T_PCH2:
   7943 	case WM_T_PCH_LPT:
   7944 	case WM_T_PCH_SPT:
   7945 		wm_put_swfwhw_semaphore(sc);
   7946 		break;
   7947 	default:
   7948 		/* nothing to do */
   7949 		rv = 0;
   7950 		break;
   7951 	}
   7952 
   7953 	/* get_cfg_done */
   7954 	wm_get_cfg_done(sc);
   7955 
   7956 	/* extra setup */
   7957 	switch (sc->sc_type) {
   7958 	case WM_T_82542_2_0:
   7959 	case WM_T_82542_2_1:
   7960 	case WM_T_82543:
   7961 	case WM_T_82544:
   7962 	case WM_T_82540:
   7963 	case WM_T_82545:
   7964 	case WM_T_82545_3:
   7965 	case WM_T_82546:
   7966 	case WM_T_82546_3:
   7967 	case WM_T_82541_2:
   7968 	case WM_T_82547_2:
   7969 	case WM_T_82571:
   7970 	case WM_T_82572:
   7971 	case WM_T_82573:
   7972 	case WM_T_82575:
   7973 	case WM_T_82576:
   7974 	case WM_T_82580:
   7975 	case WM_T_I350:
   7976 	case WM_T_I354:
   7977 	case WM_T_I210:
   7978 	case WM_T_I211:
   7979 	case WM_T_80003:
   7980 		/* null */
   7981 		break;
   7982 	case WM_T_82574:
   7983 	case WM_T_82583:
   7984 		wm_lplu_d0_disable(sc);
   7985 		break;
   7986 	case WM_T_82541:
   7987 	case WM_T_82547:
   7988 		/* XXX Configure actively LED after PHY reset */
   7989 		break;
   7990 	case WM_T_ICH8:
   7991 	case WM_T_ICH9:
   7992 	case WM_T_ICH10:
   7993 	case WM_T_PCH:
   7994 	case WM_T_PCH2:
   7995 	case WM_T_PCH_LPT:
   7996 	case WM_T_PCH_SPT:
   7997 		/* Allow time for h/w to get to a quiescent state afer reset */
   7998 		delay(10*1000);
   7999 
   8000 		if (sc->sc_type == WM_T_PCH)
   8001 			wm_hv_phy_workaround_ich8lan(sc);
   8002 
   8003 		if (sc->sc_type == WM_T_PCH2)
   8004 			wm_lv_phy_workaround_ich8lan(sc);
   8005 
   8006 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   8007 			/*
   8008 			 * dummy read to clear the phy wakeup bit after lcd
   8009 			 * reset
   8010 			 */
   8011 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   8012 		}
   8013 
   8014 		/*
   8015 		 * XXX Configure the LCD with th extended configuration region
   8016 		 * in NVM
   8017 		 */
   8018 
   8019 		/* Disable D0 LPLU. */
   8020 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8021 			wm_lplu_d0_disable_pch(sc);
   8022 		else
   8023 			wm_lplu_d0_disable(sc);	/* ICH* */
   8024 		break;
   8025 	default:
   8026 		panic("%s: unknown type\n", __func__);
   8027 		break;
   8028 	}
   8029 }
   8030 
   8031 /*
   8032  * wm_get_phy_id_82575:
   8033  *
   8034  * Return PHY ID. Return -1 if it failed.
   8035  */
   8036 static int
   8037 wm_get_phy_id_82575(struct wm_softc *sc)
   8038 {
   8039 	uint32_t reg;
   8040 	int phyid = -1;
   8041 
   8042 	/* XXX */
   8043 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8044 		return -1;
   8045 
   8046 	if (wm_sgmii_uses_mdio(sc)) {
   8047 		switch (sc->sc_type) {
   8048 		case WM_T_82575:
   8049 		case WM_T_82576:
   8050 			reg = CSR_READ(sc, WMREG_MDIC);
   8051 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8052 			break;
   8053 		case WM_T_82580:
   8054 		case WM_T_I350:
   8055 		case WM_T_I354:
   8056 		case WM_T_I210:
   8057 		case WM_T_I211:
   8058 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8059 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8060 			break;
   8061 		default:
   8062 			return -1;
   8063 		}
   8064 	}
   8065 
   8066 	return phyid;
   8067 }
   8068 
   8069 
   8070 /*
   8071  * wm_gmii_mediainit:
   8072  *
   8073  *	Initialize media for use on 1000BASE-T devices.
   8074  */
   8075 static void
   8076 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8077 {
   8078 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8079 	struct mii_data *mii = &sc->sc_mii;
   8080 	uint32_t reg;
   8081 
   8082 	/* We have GMII. */
   8083 	sc->sc_flags |= WM_F_HAS_MII;
   8084 
   8085 	if (sc->sc_type == WM_T_80003)
   8086 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8087 	else
   8088 		sc->sc_tipg = TIPG_1000T_DFLT;
   8089 
   8090 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8091 	if ((sc->sc_type == WM_T_82580)
   8092 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8093 	    || (sc->sc_type == WM_T_I211)) {
   8094 		reg = CSR_READ(sc, WMREG_PHPM);
   8095 		reg &= ~PHPM_GO_LINK_D;
   8096 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8097 	}
   8098 
   8099 	/*
   8100 	 * Let the chip set speed/duplex on its own based on
   8101 	 * signals from the PHY.
   8102 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8103 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8104 	 */
   8105 	sc->sc_ctrl |= CTRL_SLU;
   8106 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8107 
   8108 	/* Initialize our media structures and probe the GMII. */
   8109 	mii->mii_ifp = ifp;
   8110 
   8111 	/*
   8112 	 * Determine the PHY access method.
   8113 	 *
   8114 	 *  For SGMII, use SGMII specific method.
   8115 	 *
   8116 	 *  For some devices, we can determine the PHY access method
   8117 	 * from sc_type.
   8118 	 *
   8119 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8120 	 * access  method by sc_type, so use the PCI product ID for some
   8121 	 * devices.
   8122 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8123 	 * can't detect, then use bm's method.
   8124 	 */
   8125 	switch (prodid) {
   8126 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8127 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8128 		/* 82577 */
   8129 		sc->sc_phytype = WMPHY_82577;
   8130 		break;
   8131 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8132 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8133 		/* 82578 */
   8134 		sc->sc_phytype = WMPHY_82578;
   8135 		break;
   8136 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8137 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8138 		/* 82579 */
   8139 		sc->sc_phytype = WMPHY_82579;
   8140 		break;
   8141 	case PCI_PRODUCT_INTEL_82801I_BM:
   8142 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8143 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8144 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8145 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8146 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8147 		/* 82567 */
   8148 		sc->sc_phytype = WMPHY_BM;
   8149 		mii->mii_readreg = wm_gmii_bm_readreg;
   8150 		mii->mii_writereg = wm_gmii_bm_writereg;
   8151 		break;
   8152 	default:
   8153 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8154 		    && !wm_sgmii_uses_mdio(sc)){
   8155 			/* SGMII */
   8156 			mii->mii_readreg = wm_sgmii_readreg;
   8157 			mii->mii_writereg = wm_sgmii_writereg;
   8158 		} else if (sc->sc_type >= WM_T_80003) {
   8159 			/* 80003 */
   8160 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8161 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8162 		} else if (sc->sc_type >= WM_T_I210) {
   8163 			/* I210 and I211 */
   8164 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8165 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8166 		} else if (sc->sc_type >= WM_T_82580) {
   8167 			/* 82580, I350 and I354 */
   8168 			sc->sc_phytype = WMPHY_82580;
   8169 			mii->mii_readreg = wm_gmii_82580_readreg;
   8170 			mii->mii_writereg = wm_gmii_82580_writereg;
   8171 		} else if (sc->sc_type >= WM_T_82544) {
   8172 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8173 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8174 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8175 		} else {
   8176 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8177 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8178 		}
   8179 		break;
   8180 	}
   8181 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8182 		/* All PCH* use _hv_ */
   8183 		mii->mii_readreg = wm_gmii_hv_readreg;
   8184 		mii->mii_writereg = wm_gmii_hv_writereg;
   8185 	}
   8186 	mii->mii_statchg = wm_gmii_statchg;
   8187 
   8188 	wm_gmii_reset(sc);
   8189 
   8190 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8191 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8192 	    wm_gmii_mediastatus);
   8193 
   8194 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8195 	    || (sc->sc_type == WM_T_82580)
   8196 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8197 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8198 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8199 			/* Attach only one port */
   8200 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8201 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8202 		} else {
   8203 			int i, id;
   8204 			uint32_t ctrl_ext;
   8205 
   8206 			id = wm_get_phy_id_82575(sc);
   8207 			if (id != -1) {
   8208 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8209 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8210 			}
   8211 			if ((id == -1)
   8212 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8213 				/* Power on sgmii phy if it is disabled */
   8214 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8215 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8216 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8217 				CSR_WRITE_FLUSH(sc);
   8218 				delay(300*1000); /* XXX too long */
   8219 
   8220 				/* from 1 to 8 */
   8221 				for (i = 1; i < 8; i++)
   8222 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8223 					    0xffffffff, i, MII_OFFSET_ANY,
   8224 					    MIIF_DOPAUSE);
   8225 
   8226 				/* restore previous sfp cage power state */
   8227 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8228 			}
   8229 		}
   8230 	} else {
   8231 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8232 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8233 	}
   8234 
   8235 	/*
   8236 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8237 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8238 	 */
   8239 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8240 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8241 		wm_set_mdio_slow_mode_hv(sc);
   8242 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8243 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8244 	}
   8245 
   8246 	/*
   8247 	 * (For ICH8 variants)
   8248 	 * If PHY detection failed, use BM's r/w function and retry.
   8249 	 */
   8250 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8251 		/* if failed, retry with *_bm_* */
   8252 		mii->mii_readreg = wm_gmii_bm_readreg;
   8253 		mii->mii_writereg = wm_gmii_bm_writereg;
   8254 
   8255 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8256 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8257 	}
   8258 
   8259 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8260 		/* Any PHY wasn't find */
   8261 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8262 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8263 		sc->sc_phytype = WMPHY_NONE;
   8264 	} else {
   8265 		/*
   8266 		 * PHY Found!
   8267 		 * Check PHY type.
   8268 		 */
   8269 		uint32_t model;
   8270 		struct mii_softc *child;
   8271 
   8272 		child = LIST_FIRST(&mii->mii_phys);
   8273 		model = child->mii_mpd_model;
   8274 		if (model == MII_MODEL_yyINTEL_I82566)
   8275 			sc->sc_phytype = WMPHY_IGP_3;
   8276 
   8277 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8278 	}
   8279 }
   8280 
   8281 /*
   8282  * wm_gmii_mediachange:	[ifmedia interface function]
   8283  *
   8284  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8285  */
   8286 static int
   8287 wm_gmii_mediachange(struct ifnet *ifp)
   8288 {
   8289 	struct wm_softc *sc = ifp->if_softc;
   8290 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8291 	int rc;
   8292 
   8293 	if ((ifp->if_flags & IFF_UP) == 0)
   8294 		return 0;
   8295 
   8296 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8297 	sc->sc_ctrl |= CTRL_SLU;
   8298 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8299 	    || (sc->sc_type > WM_T_82543)) {
   8300 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8301 	} else {
   8302 		sc->sc_ctrl &= ~CTRL_ASDE;
   8303 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8304 		if (ife->ifm_media & IFM_FDX)
   8305 			sc->sc_ctrl |= CTRL_FD;
   8306 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8307 		case IFM_10_T:
   8308 			sc->sc_ctrl |= CTRL_SPEED_10;
   8309 			break;
   8310 		case IFM_100_TX:
   8311 			sc->sc_ctrl |= CTRL_SPEED_100;
   8312 			break;
   8313 		case IFM_1000_T:
   8314 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8315 			break;
   8316 		default:
   8317 			panic("wm_gmii_mediachange: bad media 0x%x",
   8318 			    ife->ifm_media);
   8319 		}
   8320 	}
   8321 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8322 	if (sc->sc_type <= WM_T_82543)
   8323 		wm_gmii_reset(sc);
   8324 
   8325 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8326 		return 0;
   8327 	return rc;
   8328 }
   8329 
   8330 /*
   8331  * wm_gmii_mediastatus:	[ifmedia interface function]
   8332  *
   8333  *	Get the current interface media status on a 1000BASE-T device.
   8334  */
   8335 static void
   8336 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8337 {
   8338 	struct wm_softc *sc = ifp->if_softc;
   8339 
   8340 	ether_mediastatus(ifp, ifmr);
   8341 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8342 	    | sc->sc_flowflags;
   8343 }
   8344 
   8345 #define	MDI_IO		CTRL_SWDPIN(2)
   8346 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8347 #define	MDI_CLK		CTRL_SWDPIN(3)
   8348 
   8349 static void
   8350 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8351 {
   8352 	uint32_t i, v;
   8353 
   8354 	v = CSR_READ(sc, WMREG_CTRL);
   8355 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8356 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8357 
   8358 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8359 		if (data & i)
   8360 			v |= MDI_IO;
   8361 		else
   8362 			v &= ~MDI_IO;
   8363 		CSR_WRITE(sc, WMREG_CTRL, v);
   8364 		CSR_WRITE_FLUSH(sc);
   8365 		delay(10);
   8366 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8367 		CSR_WRITE_FLUSH(sc);
   8368 		delay(10);
   8369 		CSR_WRITE(sc, WMREG_CTRL, v);
   8370 		CSR_WRITE_FLUSH(sc);
   8371 		delay(10);
   8372 	}
   8373 }
   8374 
   8375 static uint32_t
   8376 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8377 {
   8378 	uint32_t v, i, data = 0;
   8379 
   8380 	v = CSR_READ(sc, WMREG_CTRL);
   8381 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8382 	v |= CTRL_SWDPIO(3);
   8383 
   8384 	CSR_WRITE(sc, WMREG_CTRL, v);
   8385 	CSR_WRITE_FLUSH(sc);
   8386 	delay(10);
   8387 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8388 	CSR_WRITE_FLUSH(sc);
   8389 	delay(10);
   8390 	CSR_WRITE(sc, WMREG_CTRL, v);
   8391 	CSR_WRITE_FLUSH(sc);
   8392 	delay(10);
   8393 
   8394 	for (i = 0; i < 16; i++) {
   8395 		data <<= 1;
   8396 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8397 		CSR_WRITE_FLUSH(sc);
   8398 		delay(10);
   8399 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8400 			data |= 1;
   8401 		CSR_WRITE(sc, WMREG_CTRL, v);
   8402 		CSR_WRITE_FLUSH(sc);
   8403 		delay(10);
   8404 	}
   8405 
   8406 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8407 	CSR_WRITE_FLUSH(sc);
   8408 	delay(10);
   8409 	CSR_WRITE(sc, WMREG_CTRL, v);
   8410 	CSR_WRITE_FLUSH(sc);
   8411 	delay(10);
   8412 
   8413 	return data;
   8414 }
   8415 
   8416 #undef MDI_IO
   8417 #undef MDI_DIR
   8418 #undef MDI_CLK
   8419 
   8420 /*
   8421  * wm_gmii_i82543_readreg:	[mii interface function]
   8422  *
   8423  *	Read a PHY register on the GMII (i82543 version).
   8424  */
   8425 static int
   8426 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8427 {
   8428 	struct wm_softc *sc = device_private(self);
   8429 	int rv;
   8430 
   8431 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8432 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8433 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8434 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8435 
   8436 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8437 	    device_xname(sc->sc_dev), phy, reg, rv));
   8438 
   8439 	return rv;
   8440 }
   8441 
   8442 /*
   8443  * wm_gmii_i82543_writereg:	[mii interface function]
   8444  *
   8445  *	Write a PHY register on the GMII (i82543 version).
   8446  */
   8447 static void
   8448 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8449 {
   8450 	struct wm_softc *sc = device_private(self);
   8451 
   8452 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8453 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8454 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8455 	    (MII_COMMAND_START << 30), 32);
   8456 }
   8457 
   8458 /*
   8459  * wm_gmii_i82544_readreg:	[mii interface function]
   8460  *
   8461  *	Read a PHY register on the GMII.
   8462  */
   8463 static int
   8464 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8465 {
   8466 	struct wm_softc *sc = device_private(self);
   8467 	uint32_t mdic = 0;
   8468 	int i, rv;
   8469 
   8470 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8471 	    MDIC_REGADD(reg));
   8472 
   8473 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8474 		mdic = CSR_READ(sc, WMREG_MDIC);
   8475 		if (mdic & MDIC_READY)
   8476 			break;
   8477 		delay(50);
   8478 	}
   8479 
   8480 	if ((mdic & MDIC_READY) == 0) {
   8481 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8482 		    device_xname(sc->sc_dev), phy, reg);
   8483 		rv = 0;
   8484 	} else if (mdic & MDIC_E) {
   8485 #if 0 /* This is normal if no PHY is present. */
   8486 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8487 		    device_xname(sc->sc_dev), phy, reg);
   8488 #endif
   8489 		rv = 0;
   8490 	} else {
   8491 		rv = MDIC_DATA(mdic);
   8492 		if (rv == 0xffff)
   8493 			rv = 0;
   8494 	}
   8495 
   8496 	return rv;
   8497 }
   8498 
   8499 /*
   8500  * wm_gmii_i82544_writereg:	[mii interface function]
   8501  *
   8502  *	Write a PHY register on the GMII.
   8503  */
   8504 static void
   8505 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8506 {
   8507 	struct wm_softc *sc = device_private(self);
   8508 	uint32_t mdic = 0;
   8509 	int i;
   8510 
   8511 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8512 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8513 
   8514 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8515 		mdic = CSR_READ(sc, WMREG_MDIC);
   8516 		if (mdic & MDIC_READY)
   8517 			break;
   8518 		delay(50);
   8519 	}
   8520 
   8521 	if ((mdic & MDIC_READY) == 0)
   8522 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8523 		    device_xname(sc->sc_dev), phy, reg);
   8524 	else if (mdic & MDIC_E)
   8525 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8526 		    device_xname(sc->sc_dev), phy, reg);
   8527 }
   8528 
   8529 /*
   8530  * wm_gmii_i80003_readreg:	[mii interface function]
   8531  *
   8532  *	Read a PHY register on the kumeran
   8533  * This could be handled by the PHY layer if we didn't have to lock the
   8534  * ressource ...
   8535  */
   8536 static int
   8537 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8538 {
   8539 	struct wm_softc *sc = device_private(self);
   8540 	int sem;
   8541 	int rv;
   8542 
   8543 	if (phy != 1) /* only one PHY on kumeran bus */
   8544 		return 0;
   8545 
   8546 	sem = swfwphysem[sc->sc_funcid];
   8547 	if (wm_get_swfw_semaphore(sc, sem)) {
   8548 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8549 		    __func__);
   8550 		return 0;
   8551 	}
   8552 
   8553 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8554 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8555 		    reg >> GG82563_PAGE_SHIFT);
   8556 	} else {
   8557 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8558 		    reg >> GG82563_PAGE_SHIFT);
   8559 	}
   8560 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8561 	delay(200);
   8562 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8563 	delay(200);
   8564 
   8565 	wm_put_swfw_semaphore(sc, sem);
   8566 	return rv;
   8567 }
   8568 
   8569 /*
   8570  * wm_gmii_i80003_writereg:	[mii interface function]
   8571  *
   8572  *	Write a PHY register on the kumeran.
   8573  * This could be handled by the PHY layer if we didn't have to lock the
   8574  * ressource ...
   8575  */
   8576 static void
   8577 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8578 {
   8579 	struct wm_softc *sc = device_private(self);
   8580 	int sem;
   8581 
   8582 	if (phy != 1) /* only one PHY on kumeran bus */
   8583 		return;
   8584 
   8585 	sem = swfwphysem[sc->sc_funcid];
   8586 	if (wm_get_swfw_semaphore(sc, sem)) {
   8587 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8588 		    __func__);
   8589 		return;
   8590 	}
   8591 
   8592 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8593 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8594 		    reg >> GG82563_PAGE_SHIFT);
   8595 	} else {
   8596 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8597 		    reg >> GG82563_PAGE_SHIFT);
   8598 	}
   8599 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8600 	delay(200);
   8601 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8602 	delay(200);
   8603 
   8604 	wm_put_swfw_semaphore(sc, sem);
   8605 }
   8606 
   8607 /*
   8608  * wm_gmii_bm_readreg:	[mii interface function]
   8609  *
   8610  *	Read a PHY register on the kumeran
   8611  * This could be handled by the PHY layer if we didn't have to lock the
   8612  * ressource ...
   8613  */
   8614 static int
   8615 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8616 {
   8617 	struct wm_softc *sc = device_private(self);
   8618 	int sem;
   8619 	int rv;
   8620 
   8621 	sem = swfwphysem[sc->sc_funcid];
   8622 	if (wm_get_swfw_semaphore(sc, sem)) {
   8623 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8624 		    __func__);
   8625 		return 0;
   8626 	}
   8627 
   8628 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8629 		if (phy == 1)
   8630 			wm_gmii_i82544_writereg(self, phy,
   8631 			    MII_IGPHY_PAGE_SELECT, reg);
   8632 		else
   8633 			wm_gmii_i82544_writereg(self, phy,
   8634 			    GG82563_PHY_PAGE_SELECT,
   8635 			    reg >> GG82563_PAGE_SHIFT);
   8636 	}
   8637 
   8638 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8639 	wm_put_swfw_semaphore(sc, sem);
   8640 	return rv;
   8641 }
   8642 
   8643 /*
   8644  * wm_gmii_bm_writereg:	[mii interface function]
   8645  *
   8646  *	Write a PHY register on the kumeran.
   8647  * This could be handled by the PHY layer if we didn't have to lock the
   8648  * ressource ...
   8649  */
   8650 static void
   8651 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8652 {
   8653 	struct wm_softc *sc = device_private(self);
   8654 	int sem;
   8655 
   8656 	sem = swfwphysem[sc->sc_funcid];
   8657 	if (wm_get_swfw_semaphore(sc, sem)) {
   8658 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8659 		    __func__);
   8660 		return;
   8661 	}
   8662 
   8663 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8664 		if (phy == 1)
   8665 			wm_gmii_i82544_writereg(self, phy,
   8666 			    MII_IGPHY_PAGE_SELECT, reg);
   8667 		else
   8668 			wm_gmii_i82544_writereg(self, phy,
   8669 			    GG82563_PHY_PAGE_SELECT,
   8670 			    reg >> GG82563_PAGE_SHIFT);
   8671 	}
   8672 
   8673 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8674 	wm_put_swfw_semaphore(sc, sem);
   8675 }
   8676 
   8677 static void
   8678 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8679 {
   8680 	struct wm_softc *sc = device_private(self);
   8681 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8682 	uint16_t wuce;
   8683 
   8684 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8685 	if (sc->sc_type == WM_T_PCH) {
   8686 		/* XXX e1000 driver do nothing... why? */
   8687 	}
   8688 
   8689 	/* Set page 769 */
   8690 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8691 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8692 
   8693 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8694 
   8695 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8696 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8697 	    wuce | BM_WUC_ENABLE_BIT);
   8698 
   8699 	/* Select page 800 */
   8700 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8701 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8702 
   8703 	/* Write page 800 */
   8704 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8705 
   8706 	if (rd)
   8707 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8708 	else
   8709 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8710 
   8711 	/* Set page 769 */
   8712 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8713 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8714 
   8715 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8716 }
   8717 
   8718 /*
   8719  * wm_gmii_hv_readreg:	[mii interface function]
   8720  *
   8721  *	Read a PHY register on the kumeran
   8722  * This could be handled by the PHY layer if we didn't have to lock the
   8723  * ressource ...
   8724  */
   8725 static int
   8726 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8727 {
   8728 	struct wm_softc *sc = device_private(self);
   8729 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8730 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8731 	uint16_t val;
   8732 	int rv;
   8733 
   8734 	if (wm_get_swfwhw_semaphore(sc)) {
   8735 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8736 		    __func__);
   8737 		return 0;
   8738 	}
   8739 
   8740 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8741 	if (sc->sc_phytype == WMPHY_82577) {
   8742 		/* XXX must write */
   8743 	}
   8744 
   8745 	/* Page 800 works differently than the rest so it has its own func */
   8746 	if (page == BM_WUC_PAGE) {
   8747 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8748 		return val;
   8749 	}
   8750 
   8751 	/*
   8752 	 * Lower than page 768 works differently than the rest so it has its
   8753 	 * own func
   8754 	 */
   8755 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8756 		printf("gmii_hv_readreg!!!\n");
   8757 		return 0;
   8758 	}
   8759 
   8760 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8761 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8762 		    page << BME1000_PAGE_SHIFT);
   8763 	}
   8764 
   8765 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8766 	wm_put_swfwhw_semaphore(sc);
   8767 	return rv;
   8768 }
   8769 
   8770 /*
   8771  * wm_gmii_hv_writereg:	[mii interface function]
   8772  *
   8773  *	Write a PHY register on the kumeran.
   8774  * This could be handled by the PHY layer if we didn't have to lock the
   8775  * ressource ...
   8776  */
   8777 static void
   8778 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8779 {
   8780 	struct wm_softc *sc = device_private(self);
   8781 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8782 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8783 
   8784 	if (wm_get_swfwhw_semaphore(sc)) {
   8785 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8786 		    __func__);
   8787 		return;
   8788 	}
   8789 
   8790 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8791 
   8792 	/* Page 800 works differently than the rest so it has its own func */
   8793 	if (page == BM_WUC_PAGE) {
   8794 		uint16_t tmp;
   8795 
   8796 		tmp = val;
   8797 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8798 		return;
   8799 	}
   8800 
   8801 	/*
   8802 	 * Lower than page 768 works differently than the rest so it has its
   8803 	 * own func
   8804 	 */
   8805 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8806 		printf("gmii_hv_writereg!!!\n");
   8807 		return;
   8808 	}
   8809 
   8810 	/*
   8811 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8812 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8813 	 */
   8814 
   8815 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8816 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8817 		    page << BME1000_PAGE_SHIFT);
   8818 	}
   8819 
   8820 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8821 	wm_put_swfwhw_semaphore(sc);
   8822 }
   8823 
   8824 /*
   8825  * wm_gmii_82580_readreg:	[mii interface function]
   8826  *
   8827  *	Read a PHY register on the 82580 and I350.
   8828  * This could be handled by the PHY layer if we didn't have to lock the
   8829  * ressource ...
   8830  */
   8831 static int
   8832 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8833 {
   8834 	struct wm_softc *sc = device_private(self);
   8835 	int sem;
   8836 	int rv;
   8837 
   8838 	sem = swfwphysem[sc->sc_funcid];
   8839 	if (wm_get_swfw_semaphore(sc, sem)) {
   8840 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8841 		    __func__);
   8842 		return 0;
   8843 	}
   8844 
   8845 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8846 
   8847 	wm_put_swfw_semaphore(sc, sem);
   8848 	return rv;
   8849 }
   8850 
   8851 /*
   8852  * wm_gmii_82580_writereg:	[mii interface function]
   8853  *
   8854  *	Write a PHY register on the 82580 and I350.
   8855  * This could be handled by the PHY layer if we didn't have to lock the
   8856  * ressource ...
   8857  */
   8858 static void
   8859 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8860 {
   8861 	struct wm_softc *sc = device_private(self);
   8862 	int sem;
   8863 
   8864 	sem = swfwphysem[sc->sc_funcid];
   8865 	if (wm_get_swfw_semaphore(sc, sem)) {
   8866 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8867 		    __func__);
   8868 		return;
   8869 	}
   8870 
   8871 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8872 
   8873 	wm_put_swfw_semaphore(sc, sem);
   8874 }
   8875 
   8876 /*
   8877  * wm_gmii_gs40g_readreg:	[mii interface function]
   8878  *
   8879  *	Read a PHY register on the I2100 and I211.
   8880  * This could be handled by the PHY layer if we didn't have to lock the
   8881  * ressource ...
   8882  */
   8883 static int
   8884 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8885 {
   8886 	struct wm_softc *sc = device_private(self);
   8887 	int sem;
   8888 	int page, offset;
   8889 	int rv;
   8890 
   8891 	/* Acquire semaphore */
   8892 	sem = swfwphysem[sc->sc_funcid];
   8893 	if (wm_get_swfw_semaphore(sc, sem)) {
   8894 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8895 		    __func__);
   8896 		return 0;
   8897 	}
   8898 
   8899 	/* Page select */
   8900 	page = reg >> GS40G_PAGE_SHIFT;
   8901 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8902 
   8903 	/* Read reg */
   8904 	offset = reg & GS40G_OFFSET_MASK;
   8905 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8906 
   8907 	wm_put_swfw_semaphore(sc, sem);
   8908 	return rv;
   8909 }
   8910 
   8911 /*
   8912  * wm_gmii_gs40g_writereg:	[mii interface function]
   8913  *
   8914  *	Write a PHY register on the I210 and I211.
   8915  * This could be handled by the PHY layer if we didn't have to lock the
   8916  * ressource ...
   8917  */
   8918 static void
   8919 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8920 {
   8921 	struct wm_softc *sc = device_private(self);
   8922 	int sem;
   8923 	int page, offset;
   8924 
   8925 	/* Acquire semaphore */
   8926 	sem = swfwphysem[sc->sc_funcid];
   8927 	if (wm_get_swfw_semaphore(sc, sem)) {
   8928 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8929 		    __func__);
   8930 		return;
   8931 	}
   8932 
   8933 	/* Page select */
   8934 	page = reg >> GS40G_PAGE_SHIFT;
   8935 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8936 
   8937 	/* Write reg */
   8938 	offset = reg & GS40G_OFFSET_MASK;
   8939 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8940 
   8941 	/* Release semaphore */
   8942 	wm_put_swfw_semaphore(sc, sem);
   8943 }
   8944 
   8945 /*
   8946  * wm_gmii_statchg:	[mii interface function]
   8947  *
   8948  *	Callback from MII layer when media changes.
   8949  */
   8950 static void
   8951 wm_gmii_statchg(struct ifnet *ifp)
   8952 {
   8953 	struct wm_softc *sc = ifp->if_softc;
   8954 	struct mii_data *mii = &sc->sc_mii;
   8955 
   8956 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8957 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8958 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8959 
   8960 	/*
   8961 	 * Get flow control negotiation result.
   8962 	 */
   8963 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8964 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8965 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8966 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8967 	}
   8968 
   8969 	if (sc->sc_flowflags & IFM_FLOW) {
   8970 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8971 			sc->sc_ctrl |= CTRL_TFCE;
   8972 			sc->sc_fcrtl |= FCRTL_XONE;
   8973 		}
   8974 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8975 			sc->sc_ctrl |= CTRL_RFCE;
   8976 	}
   8977 
   8978 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8979 		DPRINTF(WM_DEBUG_LINK,
   8980 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8981 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8982 	} else {
   8983 		DPRINTF(WM_DEBUG_LINK,
   8984 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8985 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8986 	}
   8987 
   8988 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8989 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8990 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8991 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8992 	if (sc->sc_type == WM_T_80003) {
   8993 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8994 		case IFM_1000_T:
   8995 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8996 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8997 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8998 			break;
   8999 		default:
   9000 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9001 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9002 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9003 			break;
   9004 		}
   9005 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9006 	}
   9007 }
   9008 
   9009 /*
   9010  * wm_kmrn_readreg:
   9011  *
   9012  *	Read a kumeran register
   9013  */
   9014 static int
   9015 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9016 {
   9017 	int rv;
   9018 
   9019 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9020 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9021 			aprint_error_dev(sc->sc_dev,
   9022 			    "%s: failed to get semaphore\n", __func__);
   9023 			return 0;
   9024 		}
   9025 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9026 		if (wm_get_swfwhw_semaphore(sc)) {
   9027 			aprint_error_dev(sc->sc_dev,
   9028 			    "%s: failed to get semaphore\n", __func__);
   9029 			return 0;
   9030 		}
   9031 	}
   9032 
   9033 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9034 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9035 	    KUMCTRLSTA_REN);
   9036 	CSR_WRITE_FLUSH(sc);
   9037 	delay(2);
   9038 
   9039 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9040 
   9041 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9042 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9043 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9044 		wm_put_swfwhw_semaphore(sc);
   9045 
   9046 	return rv;
   9047 }
   9048 
   9049 /*
   9050  * wm_kmrn_writereg:
   9051  *
   9052  *	Write a kumeran register
   9053  */
   9054 static void
   9055 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9056 {
   9057 
   9058 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9059 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9060 			aprint_error_dev(sc->sc_dev,
   9061 			    "%s: failed to get semaphore\n", __func__);
   9062 			return;
   9063 		}
   9064 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9065 		if (wm_get_swfwhw_semaphore(sc)) {
   9066 			aprint_error_dev(sc->sc_dev,
   9067 			    "%s: failed to get semaphore\n", __func__);
   9068 			return;
   9069 		}
   9070 	}
   9071 
   9072 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9073 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9074 	    (val & KUMCTRLSTA_MASK));
   9075 
   9076 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9077 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9078 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9079 		wm_put_swfwhw_semaphore(sc);
   9080 }
   9081 
   9082 /* SGMII related */
   9083 
   9084 /*
   9085  * wm_sgmii_uses_mdio
   9086  *
   9087  * Check whether the transaction is to the internal PHY or the external
   9088  * MDIO interface. Return true if it's MDIO.
   9089  */
   9090 static bool
   9091 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9092 {
   9093 	uint32_t reg;
   9094 	bool ismdio = false;
   9095 
   9096 	switch (sc->sc_type) {
   9097 	case WM_T_82575:
   9098 	case WM_T_82576:
   9099 		reg = CSR_READ(sc, WMREG_MDIC);
   9100 		ismdio = ((reg & MDIC_DEST) != 0);
   9101 		break;
   9102 	case WM_T_82580:
   9103 	case WM_T_I350:
   9104 	case WM_T_I354:
   9105 	case WM_T_I210:
   9106 	case WM_T_I211:
   9107 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9108 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9109 		break;
   9110 	default:
   9111 		break;
   9112 	}
   9113 
   9114 	return ismdio;
   9115 }
   9116 
   9117 /*
   9118  * wm_sgmii_readreg:	[mii interface function]
   9119  *
   9120  *	Read a PHY register on the SGMII
   9121  * This could be handled by the PHY layer if we didn't have to lock the
   9122  * ressource ...
   9123  */
   9124 static int
   9125 wm_sgmii_readreg(device_t self, int phy, int reg)
   9126 {
   9127 	struct wm_softc *sc = device_private(self);
   9128 	uint32_t i2ccmd;
   9129 	int i, rv;
   9130 
   9131 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9132 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9133 		    __func__);
   9134 		return 0;
   9135 	}
   9136 
   9137 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9138 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9139 	    | I2CCMD_OPCODE_READ;
   9140 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9141 
   9142 	/* Poll the ready bit */
   9143 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9144 		delay(50);
   9145 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9146 		if (i2ccmd & I2CCMD_READY)
   9147 			break;
   9148 	}
   9149 	if ((i2ccmd & I2CCMD_READY) == 0)
   9150 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9151 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9152 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9153 
   9154 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9155 
   9156 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9157 	return rv;
   9158 }
   9159 
   9160 /*
   9161  * wm_sgmii_writereg:	[mii interface function]
   9162  *
   9163  *	Write a PHY register on the SGMII.
   9164  * This could be handled by the PHY layer if we didn't have to lock the
   9165  * ressource ...
   9166  */
   9167 static void
   9168 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9169 {
   9170 	struct wm_softc *sc = device_private(self);
   9171 	uint32_t i2ccmd;
   9172 	int i;
   9173 	int val_swapped;
   9174 
   9175 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9176 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9177 		    __func__);
   9178 		return;
   9179 	}
   9180 	/* Swap the data bytes for the I2C interface */
   9181 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9182 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9183 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9184 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9185 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9186 
   9187 	/* Poll the ready bit */
   9188 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9189 		delay(50);
   9190 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9191 		if (i2ccmd & I2CCMD_READY)
   9192 			break;
   9193 	}
   9194 	if ((i2ccmd & I2CCMD_READY) == 0)
   9195 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9196 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9197 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9198 
   9199 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9200 }
   9201 
   9202 /* TBI related */
   9203 
   9204 /*
   9205  * wm_tbi_mediainit:
   9206  *
   9207  *	Initialize media for use on 1000BASE-X devices.
   9208  */
   9209 static void
   9210 wm_tbi_mediainit(struct wm_softc *sc)
   9211 {
   9212 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9213 	const char *sep = "";
   9214 
   9215 	if (sc->sc_type < WM_T_82543)
   9216 		sc->sc_tipg = TIPG_WM_DFLT;
   9217 	else
   9218 		sc->sc_tipg = TIPG_LG_DFLT;
   9219 
   9220 	sc->sc_tbi_serdes_anegticks = 5;
   9221 
   9222 	/* Initialize our media structures */
   9223 	sc->sc_mii.mii_ifp = ifp;
   9224 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9225 
   9226 	if ((sc->sc_type >= WM_T_82575)
   9227 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9228 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9229 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9230 	else
   9231 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9232 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9233 
   9234 	/*
   9235 	 * SWD Pins:
   9236 	 *
   9237 	 *	0 = Link LED (output)
   9238 	 *	1 = Loss Of Signal (input)
   9239 	 */
   9240 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9241 
   9242 	/* XXX Perhaps this is only for TBI */
   9243 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9244 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9245 
   9246 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9247 		sc->sc_ctrl &= ~CTRL_LRST;
   9248 
   9249 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9250 
   9251 #define	ADD(ss, mm, dd)							\
   9252 do {									\
   9253 	aprint_normal("%s%s", sep, ss);					\
   9254 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9255 	sep = ", ";							\
   9256 } while (/*CONSTCOND*/0)
   9257 
   9258 	aprint_normal_dev(sc->sc_dev, "");
   9259 
   9260 	/* Only 82545 is LX */
   9261 	if (sc->sc_type == WM_T_82545) {
   9262 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9263 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9264 	} else {
   9265 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9266 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9267 	}
   9268 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9269 	aprint_normal("\n");
   9270 
   9271 #undef ADD
   9272 
   9273 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9274 }
   9275 
   9276 /*
   9277  * wm_tbi_mediachange:	[ifmedia interface function]
   9278  *
   9279  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9280  */
   9281 static int
   9282 wm_tbi_mediachange(struct ifnet *ifp)
   9283 {
   9284 	struct wm_softc *sc = ifp->if_softc;
   9285 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9286 	uint32_t status;
   9287 	int i;
   9288 
   9289 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9290 		/* XXX need some work for >= 82571 and < 82575 */
   9291 		if (sc->sc_type < WM_T_82575)
   9292 			return 0;
   9293 	}
   9294 
   9295 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9296 	    || (sc->sc_type >= WM_T_82575))
   9297 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9298 
   9299 	sc->sc_ctrl &= ~CTRL_LRST;
   9300 	sc->sc_txcw = TXCW_ANE;
   9301 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9302 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9303 	else if (ife->ifm_media & IFM_FDX)
   9304 		sc->sc_txcw |= TXCW_FD;
   9305 	else
   9306 		sc->sc_txcw |= TXCW_HD;
   9307 
   9308 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9309 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9310 
   9311 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9312 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9313 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9314 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9315 	CSR_WRITE_FLUSH(sc);
   9316 	delay(1000);
   9317 
   9318 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9319 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9320 
   9321 	/*
   9322 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9323 	 * optics detect a signal, 0 if they don't.
   9324 	 */
   9325 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9326 		/* Have signal; wait for the link to come up. */
   9327 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9328 			delay(10000);
   9329 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9330 				break;
   9331 		}
   9332 
   9333 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9334 			    device_xname(sc->sc_dev),i));
   9335 
   9336 		status = CSR_READ(sc, WMREG_STATUS);
   9337 		DPRINTF(WM_DEBUG_LINK,
   9338 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9339 			device_xname(sc->sc_dev),status, STATUS_LU));
   9340 		if (status & STATUS_LU) {
   9341 			/* Link is up. */
   9342 			DPRINTF(WM_DEBUG_LINK,
   9343 			    ("%s: LINK: set media -> link up %s\n",
   9344 			    device_xname(sc->sc_dev),
   9345 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9346 
   9347 			/*
   9348 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9349 			 * so we should update sc->sc_ctrl
   9350 			 */
   9351 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9352 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9353 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9354 			if (status & STATUS_FD)
   9355 				sc->sc_tctl |=
   9356 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9357 			else
   9358 				sc->sc_tctl |=
   9359 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9360 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9361 				sc->sc_fcrtl |= FCRTL_XONE;
   9362 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9363 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9364 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9365 				      sc->sc_fcrtl);
   9366 			sc->sc_tbi_linkup = 1;
   9367 		} else {
   9368 			if (i == WM_LINKUP_TIMEOUT)
   9369 				wm_check_for_link(sc);
   9370 			/* Link is down. */
   9371 			DPRINTF(WM_DEBUG_LINK,
   9372 			    ("%s: LINK: set media -> link down\n",
   9373 			    device_xname(sc->sc_dev)));
   9374 			sc->sc_tbi_linkup = 0;
   9375 		}
   9376 	} else {
   9377 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9378 		    device_xname(sc->sc_dev)));
   9379 		sc->sc_tbi_linkup = 0;
   9380 	}
   9381 
   9382 	wm_tbi_serdes_set_linkled(sc);
   9383 
   9384 	return 0;
   9385 }
   9386 
   9387 /*
   9388  * wm_tbi_mediastatus:	[ifmedia interface function]
   9389  *
   9390  *	Get the current interface media status on a 1000BASE-X device.
   9391  */
   9392 static void
   9393 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9394 {
   9395 	struct wm_softc *sc = ifp->if_softc;
   9396 	uint32_t ctrl, status;
   9397 
   9398 	ifmr->ifm_status = IFM_AVALID;
   9399 	ifmr->ifm_active = IFM_ETHER;
   9400 
   9401 	status = CSR_READ(sc, WMREG_STATUS);
   9402 	if ((status & STATUS_LU) == 0) {
   9403 		ifmr->ifm_active |= IFM_NONE;
   9404 		return;
   9405 	}
   9406 
   9407 	ifmr->ifm_status |= IFM_ACTIVE;
   9408 	/* Only 82545 is LX */
   9409 	if (sc->sc_type == WM_T_82545)
   9410 		ifmr->ifm_active |= IFM_1000_LX;
   9411 	else
   9412 		ifmr->ifm_active |= IFM_1000_SX;
   9413 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9414 		ifmr->ifm_active |= IFM_FDX;
   9415 	else
   9416 		ifmr->ifm_active |= IFM_HDX;
   9417 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9418 	if (ctrl & CTRL_RFCE)
   9419 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9420 	if (ctrl & CTRL_TFCE)
   9421 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9422 }
   9423 
   9424 /* XXX TBI only */
   9425 static int
   9426 wm_check_for_link(struct wm_softc *sc)
   9427 {
   9428 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9429 	uint32_t rxcw;
   9430 	uint32_t ctrl;
   9431 	uint32_t status;
   9432 	uint32_t sig;
   9433 
   9434 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9435 		/* XXX need some work for >= 82571 */
   9436 		if (sc->sc_type >= WM_T_82571) {
   9437 			sc->sc_tbi_linkup = 1;
   9438 			return 0;
   9439 		}
   9440 	}
   9441 
   9442 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9443 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9444 	status = CSR_READ(sc, WMREG_STATUS);
   9445 
   9446 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9447 
   9448 	DPRINTF(WM_DEBUG_LINK,
   9449 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9450 		device_xname(sc->sc_dev), __func__,
   9451 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9452 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9453 
   9454 	/*
   9455 	 * SWDPIN   LU RXCW
   9456 	 *      0    0    0
   9457 	 *      0    0    1	(should not happen)
   9458 	 *      0    1    0	(should not happen)
   9459 	 *      0    1    1	(should not happen)
   9460 	 *      1    0    0	Disable autonego and force linkup
   9461 	 *      1    0    1	got /C/ but not linkup yet
   9462 	 *      1    1    0	(linkup)
   9463 	 *      1    1    1	If IFM_AUTO, back to autonego
   9464 	 *
   9465 	 */
   9466 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9467 	    && ((status & STATUS_LU) == 0)
   9468 	    && ((rxcw & RXCW_C) == 0)) {
   9469 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9470 			__func__));
   9471 		sc->sc_tbi_linkup = 0;
   9472 		/* Disable auto-negotiation in the TXCW register */
   9473 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9474 
   9475 		/*
   9476 		 * Force link-up and also force full-duplex.
   9477 		 *
   9478 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9479 		 * so we should update sc->sc_ctrl
   9480 		 */
   9481 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9482 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9483 	} else if (((status & STATUS_LU) != 0)
   9484 	    && ((rxcw & RXCW_C) != 0)
   9485 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9486 		sc->sc_tbi_linkup = 1;
   9487 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9488 			__func__));
   9489 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9490 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9491 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9492 	    && ((rxcw & RXCW_C) != 0)) {
   9493 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9494 	} else {
   9495 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9496 			status));
   9497 	}
   9498 
   9499 	return 0;
   9500 }
   9501 
   9502 /*
   9503  * wm_tbi_tick:
   9504  *
   9505  *	Check the link on TBI devices.
   9506  *	This function acts as mii_tick().
   9507  */
   9508 static void
   9509 wm_tbi_tick(struct wm_softc *sc)
   9510 {
   9511 	struct mii_data *mii = &sc->sc_mii;
   9512 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9513 	uint32_t status;
   9514 
   9515 	KASSERT(WM_CORE_LOCKED(sc));
   9516 
   9517 	status = CSR_READ(sc, WMREG_STATUS);
   9518 
   9519 	/* XXX is this needed? */
   9520 	(void)CSR_READ(sc, WMREG_RXCW);
   9521 	(void)CSR_READ(sc, WMREG_CTRL);
   9522 
   9523 	/* set link status */
   9524 	if ((status & STATUS_LU) == 0) {
   9525 		DPRINTF(WM_DEBUG_LINK,
   9526 		    ("%s: LINK: checklink -> down\n",
   9527 			device_xname(sc->sc_dev)));
   9528 		sc->sc_tbi_linkup = 0;
   9529 	} else if (sc->sc_tbi_linkup == 0) {
   9530 		DPRINTF(WM_DEBUG_LINK,
   9531 		    ("%s: LINK: checklink -> up %s\n",
   9532 			device_xname(sc->sc_dev),
   9533 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9534 		sc->sc_tbi_linkup = 1;
   9535 		sc->sc_tbi_serdes_ticks = 0;
   9536 	}
   9537 
   9538 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9539 		goto setled;
   9540 
   9541 	if ((status & STATUS_LU) == 0) {
   9542 		sc->sc_tbi_linkup = 0;
   9543 		/* If the timer expired, retry autonegotiation */
   9544 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9545 		    && (++sc->sc_tbi_serdes_ticks
   9546 			>= sc->sc_tbi_serdes_anegticks)) {
   9547 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9548 			sc->sc_tbi_serdes_ticks = 0;
   9549 			/*
   9550 			 * Reset the link, and let autonegotiation do
   9551 			 * its thing
   9552 			 */
   9553 			sc->sc_ctrl |= CTRL_LRST;
   9554 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9555 			CSR_WRITE_FLUSH(sc);
   9556 			delay(1000);
   9557 			sc->sc_ctrl &= ~CTRL_LRST;
   9558 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9559 			CSR_WRITE_FLUSH(sc);
   9560 			delay(1000);
   9561 			CSR_WRITE(sc, WMREG_TXCW,
   9562 			    sc->sc_txcw & ~TXCW_ANE);
   9563 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9564 		}
   9565 	}
   9566 
   9567 setled:
   9568 	wm_tbi_serdes_set_linkled(sc);
   9569 }
   9570 
   9571 /* SERDES related */
   9572 static void
   9573 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9574 {
   9575 	uint32_t reg;
   9576 
   9577 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9578 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9579 		return;
   9580 
   9581 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9582 	reg |= PCS_CFG_PCS_EN;
   9583 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9584 
   9585 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9586 	reg &= ~CTRL_EXT_SWDPIN(3);
   9587 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9588 	CSR_WRITE_FLUSH(sc);
   9589 }
   9590 
   9591 static int
   9592 wm_serdes_mediachange(struct ifnet *ifp)
   9593 {
   9594 	struct wm_softc *sc = ifp->if_softc;
   9595 	bool pcs_autoneg = true; /* XXX */
   9596 	uint32_t ctrl_ext, pcs_lctl, reg;
   9597 
   9598 	/* XXX Currently, this function is not called on 8257[12] */
   9599 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9600 	    || (sc->sc_type >= WM_T_82575))
   9601 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9602 
   9603 	wm_serdes_power_up_link_82575(sc);
   9604 
   9605 	sc->sc_ctrl |= CTRL_SLU;
   9606 
   9607 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9608 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9609 
   9610 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9611 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9612 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9613 	case CTRL_EXT_LINK_MODE_SGMII:
   9614 		pcs_autoneg = true;
   9615 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9616 		break;
   9617 	case CTRL_EXT_LINK_MODE_1000KX:
   9618 		pcs_autoneg = false;
   9619 		/* FALLTHROUGH */
   9620 	default:
   9621 		if ((sc->sc_type == WM_T_82575)
   9622 		    || (sc->sc_type == WM_T_82576)) {
   9623 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9624 				pcs_autoneg = false;
   9625 		}
   9626 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9627 		    | CTRL_FRCFDX;
   9628 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9629 	}
   9630 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9631 
   9632 	if (pcs_autoneg) {
   9633 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9634 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9635 
   9636 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9637 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9638 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9639 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9640 	} else
   9641 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9642 
   9643 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9644 
   9645 
   9646 	return 0;
   9647 }
   9648 
   9649 static void
   9650 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9651 {
   9652 	struct wm_softc *sc = ifp->if_softc;
   9653 	struct mii_data *mii = &sc->sc_mii;
   9654 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9655 	uint32_t pcs_adv, pcs_lpab, reg;
   9656 
   9657 	ifmr->ifm_status = IFM_AVALID;
   9658 	ifmr->ifm_active = IFM_ETHER;
   9659 
   9660 	/* Check PCS */
   9661 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9662 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9663 		ifmr->ifm_active |= IFM_NONE;
   9664 		sc->sc_tbi_linkup = 0;
   9665 		goto setled;
   9666 	}
   9667 
   9668 	sc->sc_tbi_linkup = 1;
   9669 	ifmr->ifm_status |= IFM_ACTIVE;
   9670 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9671 	if ((reg & PCS_LSTS_FDX) != 0)
   9672 		ifmr->ifm_active |= IFM_FDX;
   9673 	else
   9674 		ifmr->ifm_active |= IFM_HDX;
   9675 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9676 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9677 		/* Check flow */
   9678 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9679 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9680 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9681 			goto setled;
   9682 		}
   9683 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9684 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9685 		DPRINTF(WM_DEBUG_LINK,
   9686 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9687 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9688 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9689 			mii->mii_media_active |= IFM_FLOW
   9690 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9691 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9692 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9693 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9694 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9695 			mii->mii_media_active |= IFM_FLOW
   9696 			    | IFM_ETH_TXPAUSE;
   9697 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9698 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9699 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9700 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9701 			mii->mii_media_active |= IFM_FLOW
   9702 			    | IFM_ETH_RXPAUSE;
   9703 		} else {
   9704 		}
   9705 	}
   9706 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9707 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9708 setled:
   9709 	wm_tbi_serdes_set_linkled(sc);
   9710 }
   9711 
   9712 /*
   9713  * wm_serdes_tick:
   9714  *
   9715  *	Check the link on serdes devices.
   9716  */
   9717 static void
   9718 wm_serdes_tick(struct wm_softc *sc)
   9719 {
   9720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9721 	struct mii_data *mii = &sc->sc_mii;
   9722 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9723 	uint32_t reg;
   9724 
   9725 	KASSERT(WM_CORE_LOCKED(sc));
   9726 
   9727 	mii->mii_media_status = IFM_AVALID;
   9728 	mii->mii_media_active = IFM_ETHER;
   9729 
   9730 	/* Check PCS */
   9731 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9732 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9733 		mii->mii_media_status |= IFM_ACTIVE;
   9734 		sc->sc_tbi_linkup = 1;
   9735 		sc->sc_tbi_serdes_ticks = 0;
   9736 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9737 		if ((reg & PCS_LSTS_FDX) != 0)
   9738 			mii->mii_media_active |= IFM_FDX;
   9739 		else
   9740 			mii->mii_media_active |= IFM_HDX;
   9741 	} else {
   9742 		mii->mii_media_status |= IFM_NONE;
   9743 		sc->sc_tbi_linkup = 0;
   9744 		    /* If the timer expired, retry autonegotiation */
   9745 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9746 		    && (++sc->sc_tbi_serdes_ticks
   9747 			>= sc->sc_tbi_serdes_anegticks)) {
   9748 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9749 			sc->sc_tbi_serdes_ticks = 0;
   9750 			/* XXX */
   9751 			wm_serdes_mediachange(ifp);
   9752 		}
   9753 	}
   9754 
   9755 	wm_tbi_serdes_set_linkled(sc);
   9756 }
   9757 
   9758 /* SFP related */
   9759 
   9760 static int
   9761 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9762 {
   9763 	uint32_t i2ccmd;
   9764 	int i;
   9765 
   9766 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9767 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9768 
   9769 	/* Poll the ready bit */
   9770 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9771 		delay(50);
   9772 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9773 		if (i2ccmd & I2CCMD_READY)
   9774 			break;
   9775 	}
   9776 	if ((i2ccmd & I2CCMD_READY) == 0)
   9777 		return -1;
   9778 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9779 		return -1;
   9780 
   9781 	*data = i2ccmd & 0x00ff;
   9782 
   9783 	return 0;
   9784 }
   9785 
   9786 static uint32_t
   9787 wm_sfp_get_media_type(struct wm_softc *sc)
   9788 {
   9789 	uint32_t ctrl_ext;
   9790 	uint8_t val = 0;
   9791 	int timeout = 3;
   9792 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9793 	int rv = -1;
   9794 
   9795 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9796 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9797 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9798 	CSR_WRITE_FLUSH(sc);
   9799 
   9800 	/* Read SFP module data */
   9801 	while (timeout) {
   9802 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9803 		if (rv == 0)
   9804 			break;
   9805 		delay(100*1000); /* XXX too big */
   9806 		timeout--;
   9807 	}
   9808 	if (rv != 0)
   9809 		goto out;
   9810 	switch (val) {
   9811 	case SFF_SFP_ID_SFF:
   9812 		aprint_normal_dev(sc->sc_dev,
   9813 		    "Module/Connector soldered to board\n");
   9814 		break;
   9815 	case SFF_SFP_ID_SFP:
   9816 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9817 		break;
   9818 	case SFF_SFP_ID_UNKNOWN:
   9819 		goto out;
   9820 	default:
   9821 		break;
   9822 	}
   9823 
   9824 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9825 	if (rv != 0) {
   9826 		goto out;
   9827 	}
   9828 
   9829 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9830 		mediatype = WM_MEDIATYPE_SERDES;
   9831 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9832 		sc->sc_flags |= WM_F_SGMII;
   9833 		mediatype = WM_MEDIATYPE_COPPER;
   9834 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9835 		sc->sc_flags |= WM_F_SGMII;
   9836 		mediatype = WM_MEDIATYPE_SERDES;
   9837 	}
   9838 
   9839 out:
   9840 	/* Restore I2C interface setting */
   9841 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9842 
   9843 	return mediatype;
   9844 }
   9845 /*
   9846  * NVM related.
   9847  * Microwire, SPI (w/wo EERD) and Flash.
   9848  */
   9849 
   9850 /* Both spi and uwire */
   9851 
   9852 /*
   9853  * wm_eeprom_sendbits:
   9854  *
   9855  *	Send a series of bits to the EEPROM.
   9856  */
   9857 static void
   9858 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9859 {
   9860 	uint32_t reg;
   9861 	int x;
   9862 
   9863 	reg = CSR_READ(sc, WMREG_EECD);
   9864 
   9865 	for (x = nbits; x > 0; x--) {
   9866 		if (bits & (1U << (x - 1)))
   9867 			reg |= EECD_DI;
   9868 		else
   9869 			reg &= ~EECD_DI;
   9870 		CSR_WRITE(sc, WMREG_EECD, reg);
   9871 		CSR_WRITE_FLUSH(sc);
   9872 		delay(2);
   9873 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9874 		CSR_WRITE_FLUSH(sc);
   9875 		delay(2);
   9876 		CSR_WRITE(sc, WMREG_EECD, reg);
   9877 		CSR_WRITE_FLUSH(sc);
   9878 		delay(2);
   9879 	}
   9880 }
   9881 
   9882 /*
   9883  * wm_eeprom_recvbits:
   9884  *
   9885  *	Receive a series of bits from the EEPROM.
   9886  */
   9887 static void
   9888 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9889 {
   9890 	uint32_t reg, val;
   9891 	int x;
   9892 
   9893 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9894 
   9895 	val = 0;
   9896 	for (x = nbits; x > 0; x--) {
   9897 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9898 		CSR_WRITE_FLUSH(sc);
   9899 		delay(2);
   9900 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9901 			val |= (1U << (x - 1));
   9902 		CSR_WRITE(sc, WMREG_EECD, reg);
   9903 		CSR_WRITE_FLUSH(sc);
   9904 		delay(2);
   9905 	}
   9906 	*valp = val;
   9907 }
   9908 
   9909 /* Microwire */
   9910 
   9911 /*
   9912  * wm_nvm_read_uwire:
   9913  *
   9914  *	Read a word from the EEPROM using the MicroWire protocol.
   9915  */
   9916 static int
   9917 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9918 {
   9919 	uint32_t reg, val;
   9920 	int i;
   9921 
   9922 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   9923 		device_xname(sc->sc_dev), __func__));
   9924 
   9925 	for (i = 0; i < wordcnt; i++) {
   9926 		/* Clear SK and DI. */
   9927 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9928 		CSR_WRITE(sc, WMREG_EECD, reg);
   9929 
   9930 		/*
   9931 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9932 		 * and Xen.
   9933 		 *
   9934 		 * We use this workaround only for 82540 because qemu's
   9935 		 * e1000 act as 82540.
   9936 		 */
   9937 		if (sc->sc_type == WM_T_82540) {
   9938 			reg |= EECD_SK;
   9939 			CSR_WRITE(sc, WMREG_EECD, reg);
   9940 			reg &= ~EECD_SK;
   9941 			CSR_WRITE(sc, WMREG_EECD, reg);
   9942 			CSR_WRITE_FLUSH(sc);
   9943 			delay(2);
   9944 		}
   9945 		/* XXX: end of workaround */
   9946 
   9947 		/* Set CHIP SELECT. */
   9948 		reg |= EECD_CS;
   9949 		CSR_WRITE(sc, WMREG_EECD, reg);
   9950 		CSR_WRITE_FLUSH(sc);
   9951 		delay(2);
   9952 
   9953 		/* Shift in the READ command. */
   9954 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9955 
   9956 		/* Shift in address. */
   9957 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9958 
   9959 		/* Shift out the data. */
   9960 		wm_eeprom_recvbits(sc, &val, 16);
   9961 		data[i] = val & 0xffff;
   9962 
   9963 		/* Clear CHIP SELECT. */
   9964 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9965 		CSR_WRITE(sc, WMREG_EECD, reg);
   9966 		CSR_WRITE_FLUSH(sc);
   9967 		delay(2);
   9968 	}
   9969 
   9970 	return 0;
   9971 }
   9972 
   9973 /* SPI */
   9974 
   9975 /*
   9976  * Set SPI and FLASH related information from the EECD register.
   9977  * For 82541 and 82547, the word size is taken from EEPROM.
   9978  */
   9979 static int
   9980 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9981 {
   9982 	int size;
   9983 	uint32_t reg;
   9984 	uint16_t data;
   9985 
   9986 	reg = CSR_READ(sc, WMREG_EECD);
   9987 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9988 
   9989 	/* Read the size of NVM from EECD by default */
   9990 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9991 	switch (sc->sc_type) {
   9992 	case WM_T_82541:
   9993 	case WM_T_82541_2:
   9994 	case WM_T_82547:
   9995 	case WM_T_82547_2:
   9996 		/* Set dummy value to access EEPROM */
   9997 		sc->sc_nvm_wordsize = 64;
   9998 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9999 		reg = data;
   10000 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10001 		if (size == 0)
   10002 			size = 6; /* 64 word size */
   10003 		else
   10004 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10005 		break;
   10006 	case WM_T_80003:
   10007 	case WM_T_82571:
   10008 	case WM_T_82572:
   10009 	case WM_T_82573: /* SPI case */
   10010 	case WM_T_82574: /* SPI case */
   10011 	case WM_T_82583: /* SPI case */
   10012 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10013 		if (size > 14)
   10014 			size = 14;
   10015 		break;
   10016 	case WM_T_82575:
   10017 	case WM_T_82576:
   10018 	case WM_T_82580:
   10019 	case WM_T_I350:
   10020 	case WM_T_I354:
   10021 	case WM_T_I210:
   10022 	case WM_T_I211:
   10023 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10024 		if (size > 15)
   10025 			size = 15;
   10026 		break;
   10027 	default:
   10028 		aprint_error_dev(sc->sc_dev,
   10029 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10030 		return -1;
   10031 		break;
   10032 	}
   10033 
   10034 	sc->sc_nvm_wordsize = 1 << size;
   10035 
   10036 	return 0;
   10037 }
   10038 
   10039 /*
   10040  * wm_nvm_ready_spi:
   10041  *
   10042  *	Wait for a SPI EEPROM to be ready for commands.
   10043  */
   10044 static int
   10045 wm_nvm_ready_spi(struct wm_softc *sc)
   10046 {
   10047 	uint32_t val;
   10048 	int usec;
   10049 
   10050 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10051 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10052 		wm_eeprom_recvbits(sc, &val, 8);
   10053 		if ((val & SPI_SR_RDY) == 0)
   10054 			break;
   10055 	}
   10056 	if (usec >= SPI_MAX_RETRIES) {
   10057 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10058 		return 1;
   10059 	}
   10060 	return 0;
   10061 }
   10062 
   10063 /*
   10064  * wm_nvm_read_spi:
   10065  *
   10066  *	Read a work from the EEPROM using the SPI protocol.
   10067  */
   10068 static int
   10069 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10070 {
   10071 	uint32_t reg, val;
   10072 	int i;
   10073 	uint8_t opc;
   10074 
   10075 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10076 		device_xname(sc->sc_dev), __func__));
   10077 
   10078 	/* Clear SK and CS. */
   10079 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10080 	CSR_WRITE(sc, WMREG_EECD, reg);
   10081 	CSR_WRITE_FLUSH(sc);
   10082 	delay(2);
   10083 
   10084 	if (wm_nvm_ready_spi(sc))
   10085 		return 1;
   10086 
   10087 	/* Toggle CS to flush commands. */
   10088 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10089 	CSR_WRITE_FLUSH(sc);
   10090 	delay(2);
   10091 	CSR_WRITE(sc, WMREG_EECD, reg);
   10092 	CSR_WRITE_FLUSH(sc);
   10093 	delay(2);
   10094 
   10095 	opc = SPI_OPC_READ;
   10096 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10097 		opc |= SPI_OPC_A8;
   10098 
   10099 	wm_eeprom_sendbits(sc, opc, 8);
   10100 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10101 
   10102 	for (i = 0; i < wordcnt; i++) {
   10103 		wm_eeprom_recvbits(sc, &val, 16);
   10104 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10105 	}
   10106 
   10107 	/* Raise CS and clear SK. */
   10108 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10109 	CSR_WRITE(sc, WMREG_EECD, reg);
   10110 	CSR_WRITE_FLUSH(sc);
   10111 	delay(2);
   10112 
   10113 	return 0;
   10114 }
   10115 
   10116 /* Using with EERD */
   10117 
   10118 static int
   10119 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10120 {
   10121 	uint32_t attempts = 100000;
   10122 	uint32_t i, reg = 0;
   10123 	int32_t done = -1;
   10124 
   10125 	for (i = 0; i < attempts; i++) {
   10126 		reg = CSR_READ(sc, rw);
   10127 
   10128 		if (reg & EERD_DONE) {
   10129 			done = 0;
   10130 			break;
   10131 		}
   10132 		delay(5);
   10133 	}
   10134 
   10135 	return done;
   10136 }
   10137 
   10138 static int
   10139 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10140     uint16_t *data)
   10141 {
   10142 	int i, eerd = 0;
   10143 	int error = 0;
   10144 
   10145 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10146 		device_xname(sc->sc_dev), __func__));
   10147 
   10148 	for (i = 0; i < wordcnt; i++) {
   10149 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10150 
   10151 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10152 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10153 		if (error != 0)
   10154 			break;
   10155 
   10156 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10157 	}
   10158 
   10159 	return error;
   10160 }
   10161 
   10162 /* Flash */
   10163 
   10164 static int
   10165 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10166 {
   10167 	uint32_t eecd;
   10168 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10169 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10170 	uint8_t sig_byte = 0;
   10171 
   10172 	switch (sc->sc_type) {
   10173 	case WM_T_PCH_SPT:
   10174 		/*
   10175 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10176 		 * sector valid bits from the NVM.
   10177 		 */
   10178 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10179 		if ((*bank == 0) || (*bank == 1)) {
   10180 			aprint_error_dev(sc->sc_dev,
   10181 					 "%s: no valid NVM bank present\n",
   10182 				__func__);
   10183 			return -1;
   10184 		} else {
   10185 			*bank = *bank - 2;
   10186 			return 0;
   10187 		}
   10188 	case WM_T_ICH8:
   10189 	case WM_T_ICH9:
   10190 		eecd = CSR_READ(sc, WMREG_EECD);
   10191 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10192 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10193 			return 0;
   10194 		}
   10195 		/* FALLTHROUGH */
   10196 	default:
   10197 		/* Default to 0 */
   10198 		*bank = 0;
   10199 
   10200 		/* Check bank 0 */
   10201 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10202 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10203 			*bank = 0;
   10204 			return 0;
   10205 		}
   10206 
   10207 		/* Check bank 1 */
   10208 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10209 		    &sig_byte);
   10210 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10211 			*bank = 1;
   10212 			return 0;
   10213 		}
   10214 	}
   10215 
   10216 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10217 		device_xname(sc->sc_dev)));
   10218 	return -1;
   10219 }
   10220 
   10221 /******************************************************************************
   10222  * This function does initial flash setup so that a new read/write/erase cycle
   10223  * can be started.
   10224  *
   10225  * sc - The pointer to the hw structure
   10226  ****************************************************************************/
   10227 static int32_t
   10228 wm_ich8_cycle_init(struct wm_softc *sc)
   10229 {
   10230 	uint16_t hsfsts;
   10231 	int32_t error = 1;
   10232 	int32_t i     = 0;
   10233 
   10234 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10235 
   10236 	/* May be check the Flash Des Valid bit in Hw status */
   10237 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10238 		return error;
   10239 	}
   10240 
   10241 	/* Clear FCERR in Hw status by writing 1 */
   10242 	/* Clear DAEL in Hw status by writing a 1 */
   10243 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10244 
   10245 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10246 
   10247 	/*
   10248 	 * Either we should have a hardware SPI cycle in progress bit to check
   10249 	 * against, in order to start a new cycle or FDONE bit should be
   10250 	 * changed in the hardware so that it is 1 after harware reset, which
   10251 	 * can then be used as an indication whether a cycle is in progress or
   10252 	 * has been completed .. we should also have some software semaphore
   10253 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10254 	 * threads access to those bits can be sequentiallized or a way so that
   10255 	 * 2 threads dont start the cycle at the same time
   10256 	 */
   10257 
   10258 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10259 		/*
   10260 		 * There is no cycle running at present, so we can start a
   10261 		 * cycle
   10262 		 */
   10263 
   10264 		/* Begin by setting Flash Cycle Done. */
   10265 		hsfsts |= HSFSTS_DONE;
   10266 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10267 		error = 0;
   10268 	} else {
   10269 		/*
   10270 		 * otherwise poll for sometime so the current cycle has a
   10271 		 * chance to end before giving up.
   10272 		 */
   10273 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10274 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10275 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10276 				error = 0;
   10277 				break;
   10278 			}
   10279 			delay(1);
   10280 		}
   10281 		if (error == 0) {
   10282 			/*
   10283 			 * Successful in waiting for previous cycle to timeout,
   10284 			 * now set the Flash Cycle Done.
   10285 			 */
   10286 			hsfsts |= HSFSTS_DONE;
   10287 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10288 		}
   10289 	}
   10290 	return error;
   10291 }
   10292 
   10293 /******************************************************************************
   10294  * This function starts a flash cycle and waits for its completion
   10295  *
   10296  * sc - The pointer to the hw structure
   10297  ****************************************************************************/
   10298 static int32_t
   10299 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10300 {
   10301 	uint16_t hsflctl;
   10302 	uint16_t hsfsts;
   10303 	int32_t error = 1;
   10304 	uint32_t i = 0;
   10305 
   10306 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10307 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10308 	hsflctl |= HSFCTL_GO;
   10309 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10310 
   10311 	/* Wait till FDONE bit is set to 1 */
   10312 	do {
   10313 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10314 		if (hsfsts & HSFSTS_DONE)
   10315 			break;
   10316 		delay(1);
   10317 		i++;
   10318 	} while (i < timeout);
   10319 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10320 		error = 0;
   10321 
   10322 	return error;
   10323 }
   10324 
   10325 /******************************************************************************
   10326  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10327  *
   10328  * sc - The pointer to the hw structure
   10329  * index - The index of the byte or word to read.
   10330  * size - Size of data to read, 1=byte 2=word, 4=dword
   10331  * data - Pointer to the word to store the value read.
   10332  *****************************************************************************/
   10333 static int32_t
   10334 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10335     uint32_t size, uint32_t *data)
   10336 {
   10337 	uint16_t hsfsts;
   10338 	uint16_t hsflctl;
   10339 	uint32_t flash_linear_address;
   10340 	uint32_t flash_data = 0;
   10341 	int32_t error = 1;
   10342 	int32_t count = 0;
   10343 
   10344 	if (size < 1  || size > 4 || data == 0x0 ||
   10345 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10346 		return error;
   10347 
   10348 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10349 	    sc->sc_ich8_flash_base;
   10350 
   10351 	do {
   10352 		delay(1);
   10353 		/* Steps */
   10354 		error = wm_ich8_cycle_init(sc);
   10355 		if (error)
   10356 			break;
   10357 
   10358 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10359 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10360 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10361 		    & HSFCTL_BCOUNT_MASK;
   10362 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10363 		if (sc->sc_type == WM_T_PCH_SPT) {
   10364 			/*
   10365 			 * In SPT, This register is in Lan memory space, not
   10366 			 * flash. Therefore, only 32 bit access is supported.
   10367 			 */
   10368 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10369 			    (uint32_t)hsflctl);
   10370 		} else
   10371 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10372 
   10373 		/*
   10374 		 * Write the last 24 bits of index into Flash Linear address
   10375 		 * field in Flash Address
   10376 		 */
   10377 		/* TODO: TBD maybe check the index against the size of flash */
   10378 
   10379 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10380 
   10381 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10382 
   10383 		/*
   10384 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10385 		 * the whole sequence a few more times, else read in (shift in)
   10386 		 * the Flash Data0, the order is least significant byte first
   10387 		 * msb to lsb
   10388 		 */
   10389 		if (error == 0) {
   10390 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10391 			if (size == 1)
   10392 				*data = (uint8_t)(flash_data & 0x000000FF);
   10393 			else if (size == 2)
   10394 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10395 			else if (size == 4)
   10396 				*data = (uint32_t)flash_data;
   10397 			break;
   10398 		} else {
   10399 			/*
   10400 			 * If we've gotten here, then things are probably
   10401 			 * completely hosed, but if the error condition is
   10402 			 * detected, it won't hurt to give it another try...
   10403 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10404 			 */
   10405 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10406 			if (hsfsts & HSFSTS_ERR) {
   10407 				/* Repeat for some time before giving up. */
   10408 				continue;
   10409 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10410 				break;
   10411 		}
   10412 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10413 
   10414 	return error;
   10415 }
   10416 
   10417 /******************************************************************************
   10418  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10419  *
   10420  * sc - pointer to wm_hw structure
   10421  * index - The index of the byte to read.
   10422  * data - Pointer to a byte to store the value read.
   10423  *****************************************************************************/
   10424 static int32_t
   10425 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10426 {
   10427 	int32_t status;
   10428 	uint32_t word = 0;
   10429 
   10430 	status = wm_read_ich8_data(sc, index, 1, &word);
   10431 	if (status == 0)
   10432 		*data = (uint8_t)word;
   10433 	else
   10434 		*data = 0;
   10435 
   10436 	return status;
   10437 }
   10438 
   10439 /******************************************************************************
   10440  * Reads a word from the NVM using the ICH8 flash access registers.
   10441  *
   10442  * sc - pointer to wm_hw structure
   10443  * index - The starting byte index of the word to read.
   10444  * data - Pointer to a word to store the value read.
   10445  *****************************************************************************/
   10446 static int32_t
   10447 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10448 {
   10449 	int32_t status;
   10450 	uint32_t word = 0;
   10451 
   10452 	status = wm_read_ich8_data(sc, index, 2, &word);
   10453 	if (status == 0)
   10454 		*data = (uint16_t)word;
   10455 	else
   10456 		*data = 0;
   10457 
   10458 	return status;
   10459 }
   10460 
   10461 /******************************************************************************
   10462  * Reads a dword from the NVM using the ICH8 flash access registers.
   10463  *
   10464  * sc - pointer to wm_hw structure
   10465  * index - The starting byte index of the word to read.
   10466  * data - Pointer to a word to store the value read.
   10467  *****************************************************************************/
   10468 static int32_t
   10469 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10470 {
   10471 	int32_t status;
   10472 
   10473 	status = wm_read_ich8_data(sc, index, 4, data);
   10474 	return status;
   10475 }
   10476 
   10477 /******************************************************************************
   10478  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10479  * register.
   10480  *
   10481  * sc - Struct containing variables accessed by shared code
   10482  * offset - offset of word in the EEPROM to read
   10483  * data - word read from the EEPROM
   10484  * words - number of words to read
   10485  *****************************************************************************/
   10486 static int
   10487 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10488 {
   10489 	int32_t  error = 0;
   10490 	uint32_t flash_bank = 0;
   10491 	uint32_t act_offset = 0;
   10492 	uint32_t bank_offset = 0;
   10493 	uint16_t word = 0;
   10494 	uint16_t i = 0;
   10495 
   10496 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10497 		device_xname(sc->sc_dev), __func__));
   10498 
   10499 	/*
   10500 	 * We need to know which is the valid flash bank.  In the event
   10501 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10502 	 * managing flash_bank.  So it cannot be trusted and needs
   10503 	 * to be updated with each read.
   10504 	 */
   10505 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10506 	if (error) {
   10507 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10508 			device_xname(sc->sc_dev)));
   10509 		flash_bank = 0;
   10510 	}
   10511 
   10512 	/*
   10513 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10514 	 * size
   10515 	 */
   10516 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10517 
   10518 	error = wm_get_swfwhw_semaphore(sc);
   10519 	if (error) {
   10520 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10521 		    __func__);
   10522 		return error;
   10523 	}
   10524 
   10525 	for (i = 0; i < words; i++) {
   10526 		/* The NVM part needs a byte offset, hence * 2 */
   10527 		act_offset = bank_offset + ((offset + i) * 2);
   10528 		error = wm_read_ich8_word(sc, act_offset, &word);
   10529 		if (error) {
   10530 			aprint_error_dev(sc->sc_dev,
   10531 			    "%s: failed to read NVM\n", __func__);
   10532 			break;
   10533 		}
   10534 		data[i] = word;
   10535 	}
   10536 
   10537 	wm_put_swfwhw_semaphore(sc);
   10538 	return error;
   10539 }
   10540 
   10541 /******************************************************************************
   10542  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10543  * register.
   10544  *
   10545  * sc - Struct containing variables accessed by shared code
   10546  * offset - offset of word in the EEPROM to read
   10547  * data - word read from the EEPROM
   10548  * words - number of words to read
   10549  *****************************************************************************/
   10550 static int
   10551 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10552 {
   10553 	int32_t  error = 0;
   10554 	uint32_t flash_bank = 0;
   10555 	uint32_t act_offset = 0;
   10556 	uint32_t bank_offset = 0;
   10557 	uint32_t dword = 0;
   10558 	uint16_t i = 0;
   10559 
   10560 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10561 		device_xname(sc->sc_dev), __func__));
   10562 
   10563 	/*
   10564 	 * We need to know which is the valid flash bank.  In the event
   10565 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10566 	 * managing flash_bank.  So it cannot be trusted and needs
   10567 	 * to be updated with each read.
   10568 	 */
   10569 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10570 	if (error) {
   10571 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10572 			device_xname(sc->sc_dev)));
   10573 		flash_bank = 0;
   10574 	}
   10575 
   10576 	/*
   10577 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10578 	 * size
   10579 	 */
   10580 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10581 
   10582 	error = wm_get_swfwhw_semaphore(sc);
   10583 	if (error) {
   10584 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10585 		    __func__);
   10586 		return error;
   10587 	}
   10588 
   10589 	for (i = 0; i < words; i++) {
   10590 		/* The NVM part needs a byte offset, hence * 2 */
   10591 		act_offset = bank_offset + ((offset + i) * 2);
   10592 		/* but we must read dword aligned, so mask ... */
   10593 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10594 		if (error) {
   10595 			aprint_error_dev(sc->sc_dev,
   10596 			    "%s: failed to read NVM\n", __func__);
   10597 			break;
   10598 		}
   10599 		/* ... and pick out low or high word */
   10600 		if ((act_offset & 0x2) == 0)
   10601 			data[i] = (uint16_t)(dword & 0xFFFF);
   10602 		else
   10603 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10604 	}
   10605 
   10606 	wm_put_swfwhw_semaphore(sc);
   10607 	return error;
   10608 }
   10609 
   10610 /* iNVM */
   10611 
   10612 static int
   10613 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10614 {
   10615 	int32_t  rv = 0;
   10616 	uint32_t invm_dword;
   10617 	uint16_t i;
   10618 	uint8_t record_type, word_address;
   10619 
   10620 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10621 		device_xname(sc->sc_dev), __func__));
   10622 
   10623 	for (i = 0; i < INVM_SIZE; i++) {
   10624 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10625 		/* Get record type */
   10626 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10627 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10628 			break;
   10629 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10630 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10631 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10632 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10633 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10634 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10635 			if (word_address == address) {
   10636 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10637 				rv = 0;
   10638 				break;
   10639 			}
   10640 		}
   10641 	}
   10642 
   10643 	return rv;
   10644 }
   10645 
   10646 static int
   10647 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10648 {
   10649 	int rv = 0;
   10650 	int i;
   10651 
   10652 	for (i = 0; i < words; i++) {
   10653 		switch (offset + i) {
   10654 		case NVM_OFF_MACADDR:
   10655 		case NVM_OFF_MACADDR1:
   10656 		case NVM_OFF_MACADDR2:
   10657 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10658 			if (rv != 0) {
   10659 				data[i] = 0xffff;
   10660 				rv = -1;
   10661 			}
   10662 			break;
   10663 		case NVM_OFF_CFG2:
   10664 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10665 			if (rv != 0) {
   10666 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10667 				rv = 0;
   10668 			}
   10669 			break;
   10670 		case NVM_OFF_CFG4:
   10671 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10672 			if (rv != 0) {
   10673 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10674 				rv = 0;
   10675 			}
   10676 			break;
   10677 		case NVM_OFF_LED_1_CFG:
   10678 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10679 			if (rv != 0) {
   10680 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10681 				rv = 0;
   10682 			}
   10683 			break;
   10684 		case NVM_OFF_LED_0_2_CFG:
   10685 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10686 			if (rv != 0) {
   10687 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10688 				rv = 0;
   10689 			}
   10690 			break;
   10691 		case NVM_OFF_ID_LED_SETTINGS:
   10692 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10693 			if (rv != 0) {
   10694 				*data = ID_LED_RESERVED_FFFF;
   10695 				rv = 0;
   10696 			}
   10697 			break;
   10698 		default:
   10699 			DPRINTF(WM_DEBUG_NVM,
   10700 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10701 			*data = NVM_RESERVED_WORD;
   10702 			break;
   10703 		}
   10704 	}
   10705 
   10706 	return rv;
   10707 }
   10708 
   10709 /* Lock, detecting NVM type, validate checksum, version and read */
   10710 
   10711 /*
   10712  * wm_nvm_acquire:
   10713  *
   10714  *	Perform the EEPROM handshake required on some chips.
   10715  */
   10716 static int
   10717 wm_nvm_acquire(struct wm_softc *sc)
   10718 {
   10719 	uint32_t reg;
   10720 	int x;
   10721 	int ret = 0;
   10722 
   10723 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10724 		device_xname(sc->sc_dev), __func__));
   10725 
   10726 	/* Always success */
   10727 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10728 		return 0;
   10729 
   10730 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10731 		ret = wm_get_swfwhw_semaphore(sc);
   10732 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10733 		/* This will also do wm_get_swsm_semaphore() if needed */
   10734 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10735 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10736 		ret = wm_get_swsm_semaphore(sc);
   10737 	}
   10738 
   10739 	if (ret) {
   10740 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10741 			__func__);
   10742 		return 1;
   10743 	}
   10744 
   10745 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10746 		reg = CSR_READ(sc, WMREG_EECD);
   10747 
   10748 		/* Request EEPROM access. */
   10749 		reg |= EECD_EE_REQ;
   10750 		CSR_WRITE(sc, WMREG_EECD, reg);
   10751 
   10752 		/* ..and wait for it to be granted. */
   10753 		for (x = 0; x < 1000; x++) {
   10754 			reg = CSR_READ(sc, WMREG_EECD);
   10755 			if (reg & EECD_EE_GNT)
   10756 				break;
   10757 			delay(5);
   10758 		}
   10759 		if ((reg & EECD_EE_GNT) == 0) {
   10760 			aprint_error_dev(sc->sc_dev,
   10761 			    "could not acquire EEPROM GNT\n");
   10762 			reg &= ~EECD_EE_REQ;
   10763 			CSR_WRITE(sc, WMREG_EECD, reg);
   10764 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10765 				wm_put_swfwhw_semaphore(sc);
   10766 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10767 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10768 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10769 				wm_put_swsm_semaphore(sc);
   10770 			return 1;
   10771 		}
   10772 	}
   10773 
   10774 	return 0;
   10775 }
   10776 
   10777 /*
   10778  * wm_nvm_release:
   10779  *
   10780  *	Release the EEPROM mutex.
   10781  */
   10782 static void
   10783 wm_nvm_release(struct wm_softc *sc)
   10784 {
   10785 	uint32_t reg;
   10786 
   10787 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10788 		device_xname(sc->sc_dev), __func__));
   10789 
   10790 	/* Always success */
   10791 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10792 		return;
   10793 
   10794 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10795 		reg = CSR_READ(sc, WMREG_EECD);
   10796 		reg &= ~EECD_EE_REQ;
   10797 		CSR_WRITE(sc, WMREG_EECD, reg);
   10798 	}
   10799 
   10800 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10801 		wm_put_swfwhw_semaphore(sc);
   10802 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10803 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10804 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10805 		wm_put_swsm_semaphore(sc);
   10806 }
   10807 
   10808 static int
   10809 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10810 {
   10811 	uint32_t eecd = 0;
   10812 
   10813 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10814 	    || sc->sc_type == WM_T_82583) {
   10815 		eecd = CSR_READ(sc, WMREG_EECD);
   10816 
   10817 		/* Isolate bits 15 & 16 */
   10818 		eecd = ((eecd >> 15) & 0x03);
   10819 
   10820 		/* If both bits are set, device is Flash type */
   10821 		if (eecd == 0x03)
   10822 			return 0;
   10823 	}
   10824 	return 1;
   10825 }
   10826 
   10827 static int
   10828 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10829 {
   10830 	uint32_t eec;
   10831 
   10832 	eec = CSR_READ(sc, WMREG_EEC);
   10833 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10834 		return 1;
   10835 
   10836 	return 0;
   10837 }
   10838 
   10839 /*
   10840  * wm_nvm_validate_checksum
   10841  *
   10842  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10843  */
   10844 static int
   10845 wm_nvm_validate_checksum(struct wm_softc *sc)
   10846 {
   10847 	uint16_t checksum;
   10848 	uint16_t eeprom_data;
   10849 #ifdef WM_DEBUG
   10850 	uint16_t csum_wordaddr, valid_checksum;
   10851 #endif
   10852 	int i;
   10853 
   10854 	checksum = 0;
   10855 
   10856 	/* Don't check for I211 */
   10857 	if (sc->sc_type == WM_T_I211)
   10858 		return 0;
   10859 
   10860 #ifdef WM_DEBUG
   10861 	if (sc->sc_type == WM_T_PCH_LPT) {
   10862 		csum_wordaddr = NVM_OFF_COMPAT;
   10863 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10864 	} else {
   10865 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10866 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10867 	}
   10868 
   10869 	/* Dump EEPROM image for debug */
   10870 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10871 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10872 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10873 		/* XXX PCH_SPT? */
   10874 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10875 		if ((eeprom_data & valid_checksum) == 0) {
   10876 			DPRINTF(WM_DEBUG_NVM,
   10877 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10878 				device_xname(sc->sc_dev), eeprom_data,
   10879 				    valid_checksum));
   10880 		}
   10881 	}
   10882 
   10883 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10884 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10885 		for (i = 0; i < NVM_SIZE; i++) {
   10886 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10887 				printf("XXXX ");
   10888 			else
   10889 				printf("%04hx ", eeprom_data);
   10890 			if (i % 8 == 7)
   10891 				printf("\n");
   10892 		}
   10893 	}
   10894 
   10895 #endif /* WM_DEBUG */
   10896 
   10897 	for (i = 0; i < NVM_SIZE; i++) {
   10898 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10899 			return 1;
   10900 		checksum += eeprom_data;
   10901 	}
   10902 
   10903 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10904 #ifdef WM_DEBUG
   10905 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10906 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10907 #endif
   10908 	}
   10909 
   10910 	return 0;
   10911 }
   10912 
   10913 static void
   10914 wm_nvm_version_invm(struct wm_softc *sc)
   10915 {
   10916 	uint32_t dword;
   10917 
   10918 	/*
   10919 	 * Linux's code to decode version is very strange, so we don't
   10920 	 * obey that algorithm and just use word 61 as the document.
   10921 	 * Perhaps it's not perfect though...
   10922 	 *
   10923 	 * Example:
   10924 	 *
   10925 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10926 	 */
   10927 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10928 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10929 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10930 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10931 }
   10932 
   10933 static void
   10934 wm_nvm_version(struct wm_softc *sc)
   10935 {
   10936 	uint16_t major, minor, build, patch;
   10937 	uint16_t uid0, uid1;
   10938 	uint16_t nvm_data;
   10939 	uint16_t off;
   10940 	bool check_version = false;
   10941 	bool check_optionrom = false;
   10942 	bool have_build = false;
   10943 
   10944 	/*
   10945 	 * Version format:
   10946 	 *
   10947 	 * XYYZ
   10948 	 * X0YZ
   10949 	 * X0YY
   10950 	 *
   10951 	 * Example:
   10952 	 *
   10953 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10954 	 *	82571	0x50a6	5.10.6?
   10955 	 *	82572	0x506a	5.6.10?
   10956 	 *	82572EI	0x5069	5.6.9?
   10957 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10958 	 *		0x2013	2.1.3?
   10959 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10960 	 */
   10961 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10962 	switch (sc->sc_type) {
   10963 	case WM_T_82571:
   10964 	case WM_T_82572:
   10965 	case WM_T_82574:
   10966 	case WM_T_82583:
   10967 		check_version = true;
   10968 		check_optionrom = true;
   10969 		have_build = true;
   10970 		break;
   10971 	case WM_T_82575:
   10972 	case WM_T_82576:
   10973 	case WM_T_82580:
   10974 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10975 			check_version = true;
   10976 		break;
   10977 	case WM_T_I211:
   10978 		wm_nvm_version_invm(sc);
   10979 		goto printver;
   10980 	case WM_T_I210:
   10981 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10982 			wm_nvm_version_invm(sc);
   10983 			goto printver;
   10984 		}
   10985 		/* FALLTHROUGH */
   10986 	case WM_T_I350:
   10987 	case WM_T_I354:
   10988 		check_version = true;
   10989 		check_optionrom = true;
   10990 		break;
   10991 	default:
   10992 		return;
   10993 	}
   10994 	if (check_version) {
   10995 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10996 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10997 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10998 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10999 			build = nvm_data & NVM_BUILD_MASK;
   11000 			have_build = true;
   11001 		} else
   11002 			minor = nvm_data & 0x00ff;
   11003 
   11004 		/* Decimal */
   11005 		minor = (minor / 16) * 10 + (minor % 16);
   11006 		sc->sc_nvm_ver_major = major;
   11007 		sc->sc_nvm_ver_minor = minor;
   11008 
   11009 printver:
   11010 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11011 		    sc->sc_nvm_ver_minor);
   11012 		if (have_build) {
   11013 			sc->sc_nvm_ver_build = build;
   11014 			aprint_verbose(".%d", build);
   11015 		}
   11016 	}
   11017 	if (check_optionrom) {
   11018 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11019 		/* Option ROM Version */
   11020 		if ((off != 0x0000) && (off != 0xffff)) {
   11021 			off += NVM_COMBO_VER_OFF;
   11022 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11023 			wm_nvm_read(sc, off, 1, &uid0);
   11024 			if ((uid0 != 0) && (uid0 != 0xffff)
   11025 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11026 				/* 16bits */
   11027 				major = uid0 >> 8;
   11028 				build = (uid0 << 8) | (uid1 >> 8);
   11029 				patch = uid1 & 0x00ff;
   11030 				aprint_verbose(", option ROM Version %d.%d.%d",
   11031 				    major, build, patch);
   11032 			}
   11033 		}
   11034 	}
   11035 
   11036 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11037 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11038 }
   11039 
   11040 /*
   11041  * wm_nvm_read:
   11042  *
   11043  *	Read data from the serial EEPROM.
   11044  */
   11045 static int
   11046 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11047 {
   11048 	int rv;
   11049 
   11050 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11051 		device_xname(sc->sc_dev), __func__));
   11052 
   11053 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11054 		return 1;
   11055 
   11056 	if (wm_nvm_acquire(sc))
   11057 		return 1;
   11058 
   11059 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11060 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11061 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11062 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11063 	else if (sc->sc_type == WM_T_PCH_SPT)
   11064 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11065 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11066 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11067 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11068 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11069 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11070 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11071 	else
   11072 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11073 
   11074 	wm_nvm_release(sc);
   11075 	return rv;
   11076 }
   11077 
   11078 /*
   11079  * Hardware semaphores.
   11080  * Very complexed...
   11081  */
   11082 
   11083 static int
   11084 wm_get_swsm_semaphore(struct wm_softc *sc)
   11085 {
   11086 	int32_t timeout;
   11087 	uint32_t swsm;
   11088 
   11089 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11090 		/* Get the SW semaphore. */
   11091 		timeout = sc->sc_nvm_wordsize + 1;
   11092 		while (timeout) {
   11093 			swsm = CSR_READ(sc, WMREG_SWSM);
   11094 
   11095 			if ((swsm & SWSM_SMBI) == 0)
   11096 				break;
   11097 
   11098 			delay(50);
   11099 			timeout--;
   11100 		}
   11101 
   11102 		if (timeout == 0) {
   11103 			aprint_error_dev(sc->sc_dev,
   11104 			    "could not acquire SWSM SMBI\n");
   11105 			return 1;
   11106 		}
   11107 	}
   11108 
   11109 	/* Get the FW semaphore. */
   11110 	timeout = sc->sc_nvm_wordsize + 1;
   11111 	while (timeout) {
   11112 		swsm = CSR_READ(sc, WMREG_SWSM);
   11113 		swsm |= SWSM_SWESMBI;
   11114 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11115 		/* If we managed to set the bit we got the semaphore. */
   11116 		swsm = CSR_READ(sc, WMREG_SWSM);
   11117 		if (swsm & SWSM_SWESMBI)
   11118 			break;
   11119 
   11120 		delay(50);
   11121 		timeout--;
   11122 	}
   11123 
   11124 	if (timeout == 0) {
   11125 		aprint_error_dev(sc->sc_dev,
   11126 		    "could not acquire SWSM SWESMBI\n");
   11127 		/* Release semaphores */
   11128 		wm_put_swsm_semaphore(sc);
   11129 		return 1;
   11130 	}
   11131 	return 0;
   11132 }
   11133 
   11134 /*
   11135  * Put hardware semaphore.
   11136  * Same as e1000_put_hw_semaphore_generic()
   11137  */
   11138 static void
   11139 wm_put_swsm_semaphore(struct wm_softc *sc)
   11140 {
   11141 	uint32_t swsm;
   11142 
   11143 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11144 		device_xname(sc->sc_dev), __func__));
   11145 
   11146 	swsm = CSR_READ(sc, WMREG_SWSM);
   11147 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11148 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11149 }
   11150 
   11151 /*
   11152  * Get SW/FW semaphore.
   11153  * Same as e1000_acquire_swfw_sync_82575().
   11154  */
   11155 static int
   11156 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11157 {
   11158 	uint32_t swfw_sync;
   11159 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11160 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11161 	int timeout = 200;
   11162 
   11163 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11164 		device_xname(sc->sc_dev), __func__));
   11165 
   11166 	for (timeout = 0; timeout < 200; timeout++) {
   11167 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11168 			if (wm_get_swsm_semaphore(sc)) {
   11169 				aprint_error_dev(sc->sc_dev,
   11170 				    "%s: failed to get semaphore\n",
   11171 				    __func__);
   11172 				return 1;
   11173 			}
   11174 		}
   11175 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11176 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11177 			swfw_sync |= swmask;
   11178 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11179 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11180 				wm_put_swsm_semaphore(sc);
   11181 			return 0;
   11182 		}
   11183 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11184 			wm_put_swsm_semaphore(sc);
   11185 		delay(5000);
   11186 	}
   11187 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11188 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11189 	return 1;
   11190 }
   11191 
   11192 static void
   11193 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11194 {
   11195 	uint32_t swfw_sync;
   11196 
   11197 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11198 		device_xname(sc->sc_dev), __func__));
   11199 
   11200 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11201 		while (wm_get_swsm_semaphore(sc) != 0)
   11202 			continue;
   11203 	}
   11204 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11205 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11206 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11207 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11208 		wm_put_swsm_semaphore(sc);
   11209 }
   11210 
   11211 static int
   11212 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11213 {
   11214 	uint32_t ext_ctrl;
   11215 	int timeout = 200;
   11216 
   11217 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11218 		device_xname(sc->sc_dev), __func__));
   11219 
   11220 	for (timeout = 0; timeout < 200; timeout++) {
   11221 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11222 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11223 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11224 
   11225 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11226 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11227 			return 0;
   11228 		delay(5000);
   11229 	}
   11230 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11231 	    device_xname(sc->sc_dev), ext_ctrl);
   11232 	return 1;
   11233 }
   11234 
   11235 static void
   11236 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11237 {
   11238 	uint32_t ext_ctrl;
   11239 
   11240 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11241 		device_xname(sc->sc_dev), __func__));
   11242 
   11243 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11244 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11245 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11246 }
   11247 
   11248 static int
   11249 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11250 {
   11251 	int i = 0;
   11252 	uint32_t reg;
   11253 
   11254 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11255 		device_xname(sc->sc_dev), __func__));
   11256 
   11257 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11258 	do {
   11259 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11260 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11261 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11262 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11263 			break;
   11264 		delay(2*1000);
   11265 		i++;
   11266 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11267 
   11268 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11269 		wm_put_hw_semaphore_82573(sc);
   11270 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11271 		    device_xname(sc->sc_dev));
   11272 		return -1;
   11273 	}
   11274 
   11275 	return 0;
   11276 }
   11277 
   11278 static void
   11279 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11280 {
   11281 	uint32_t reg;
   11282 
   11283 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11284 		device_xname(sc->sc_dev), __func__));
   11285 
   11286 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11287 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11288 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11289 }
   11290 
   11291 /*
   11292  * Management mode and power management related subroutines.
   11293  * BMC, AMT, suspend/resume and EEE.
   11294  */
   11295 
   11296 #ifdef WM_WOL
   11297 static int
   11298 wm_check_mng_mode(struct wm_softc *sc)
   11299 {
   11300 	int rv;
   11301 
   11302 	switch (sc->sc_type) {
   11303 	case WM_T_ICH8:
   11304 	case WM_T_ICH9:
   11305 	case WM_T_ICH10:
   11306 	case WM_T_PCH:
   11307 	case WM_T_PCH2:
   11308 	case WM_T_PCH_LPT:
   11309 	case WM_T_PCH_SPT:
   11310 		rv = wm_check_mng_mode_ich8lan(sc);
   11311 		break;
   11312 	case WM_T_82574:
   11313 	case WM_T_82583:
   11314 		rv = wm_check_mng_mode_82574(sc);
   11315 		break;
   11316 	case WM_T_82571:
   11317 	case WM_T_82572:
   11318 	case WM_T_82573:
   11319 	case WM_T_80003:
   11320 		rv = wm_check_mng_mode_generic(sc);
   11321 		break;
   11322 	default:
   11323 		/* noting to do */
   11324 		rv = 0;
   11325 		break;
   11326 	}
   11327 
   11328 	return rv;
   11329 }
   11330 
   11331 static int
   11332 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11333 {
   11334 	uint32_t fwsm;
   11335 
   11336 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11337 
   11338 	if (((fwsm & FWSM_FW_VALID) != 0)
   11339 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11340 		return 1;
   11341 
   11342 	return 0;
   11343 }
   11344 
   11345 static int
   11346 wm_check_mng_mode_82574(struct wm_softc *sc)
   11347 {
   11348 	uint16_t data;
   11349 
   11350 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11351 
   11352 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11353 		return 1;
   11354 
   11355 	return 0;
   11356 }
   11357 
   11358 static int
   11359 wm_check_mng_mode_generic(struct wm_softc *sc)
   11360 {
   11361 	uint32_t fwsm;
   11362 
   11363 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11364 
   11365 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11366 		return 1;
   11367 
   11368 	return 0;
   11369 }
   11370 #endif /* WM_WOL */
   11371 
   11372 static int
   11373 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11374 {
   11375 	uint32_t manc, fwsm, factps;
   11376 
   11377 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11378 		return 0;
   11379 
   11380 	manc = CSR_READ(sc, WMREG_MANC);
   11381 
   11382 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11383 		device_xname(sc->sc_dev), manc));
   11384 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11385 		return 0;
   11386 
   11387 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11388 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11389 		factps = CSR_READ(sc, WMREG_FACTPS);
   11390 		if (((factps & FACTPS_MNGCG) == 0)
   11391 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11392 			return 1;
   11393 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11394 		uint16_t data;
   11395 
   11396 		factps = CSR_READ(sc, WMREG_FACTPS);
   11397 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11398 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11399 			device_xname(sc->sc_dev), factps, data));
   11400 		if (((factps & FACTPS_MNGCG) == 0)
   11401 		    && ((data & NVM_CFG2_MNGM_MASK)
   11402 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11403 			return 1;
   11404 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11405 	    && ((manc & MANC_ASF_EN) == 0))
   11406 		return 1;
   11407 
   11408 	return 0;
   11409 }
   11410 
   11411 static bool
   11412 wm_phy_resetisblocked(struct wm_softc *sc)
   11413 {
   11414 	bool blocked = false;
   11415 	uint32_t reg;
   11416 	int i = 0;
   11417 
   11418 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11419 		device_xname(sc->sc_dev), __func__));
   11420 
   11421 	switch (sc->sc_type) {
   11422 	case WM_T_ICH8:
   11423 	case WM_T_ICH9:
   11424 	case WM_T_ICH10:
   11425 	case WM_T_PCH:
   11426 	case WM_T_PCH2:
   11427 	case WM_T_PCH_LPT:
   11428 	case WM_T_PCH_SPT:
   11429 		do {
   11430 			reg = CSR_READ(sc, WMREG_FWSM);
   11431 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11432 				blocked = true;
   11433 				delay(10*1000);
   11434 				continue;
   11435 			}
   11436 			blocked = false;
   11437 		} while (blocked && (i++ < 10));
   11438 		return blocked;
   11439 		break;
   11440 	case WM_T_82571:
   11441 	case WM_T_82572:
   11442 	case WM_T_82573:
   11443 	case WM_T_82574:
   11444 	case WM_T_82583:
   11445 	case WM_T_80003:
   11446 		reg = CSR_READ(sc, WMREG_MANC);
   11447 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11448 			return true;
   11449 		else
   11450 			return false;
   11451 		break;
   11452 	default:
   11453 		/* no problem */
   11454 		break;
   11455 	}
   11456 
   11457 	return false;
   11458 }
   11459 
   11460 static void
   11461 wm_get_hw_control(struct wm_softc *sc)
   11462 {
   11463 	uint32_t reg;
   11464 
   11465 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11466 		device_xname(sc->sc_dev), __func__));
   11467 
   11468 	switch (sc->sc_type) {
   11469 	case WM_T_82573:
   11470 		reg = CSR_READ(sc, WMREG_SWSM);
   11471 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11472 		break;
   11473 	case WM_T_82571:
   11474 	case WM_T_82572:
   11475 	case WM_T_82574:
   11476 	case WM_T_82583:
   11477 	case WM_T_80003:
   11478 	case WM_T_ICH8:
   11479 	case WM_T_ICH9:
   11480 	case WM_T_ICH10:
   11481 	case WM_T_PCH:
   11482 	case WM_T_PCH2:
   11483 	case WM_T_PCH_LPT:
   11484 	case WM_T_PCH_SPT:
   11485 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11486 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11487 		break;
   11488 	default:
   11489 		break;
   11490 	}
   11491 }
   11492 
   11493 static void
   11494 wm_release_hw_control(struct wm_softc *sc)
   11495 {
   11496 	uint32_t reg;
   11497 
   11498 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11499 		device_xname(sc->sc_dev), __func__));
   11500 
   11501 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11502 		return;
   11503 
   11504 	if (sc->sc_type == WM_T_82573) {
   11505 		reg = CSR_READ(sc, WMREG_SWSM);
   11506 		reg &= ~SWSM_DRV_LOAD;
   11507 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11508 	} else {
   11509 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11510 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11511 	}
   11512 }
   11513 
   11514 static void
   11515 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11516 {
   11517 	uint32_t reg;
   11518 
   11519 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11520 		device_xname(sc->sc_dev), __func__));
   11521 
   11522 	if (sc->sc_type < WM_T_PCH2)
   11523 		return;
   11524 
   11525 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11526 
   11527 	if (gate)
   11528 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11529 	else
   11530 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11531 
   11532 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11533 }
   11534 
   11535 static void
   11536 wm_smbustopci(struct wm_softc *sc)
   11537 {
   11538 	uint32_t fwsm, reg;
   11539 
   11540 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11541 		device_xname(sc->sc_dev), __func__));
   11542 
   11543 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11544 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11545 
   11546 	/* Acquire semaphore */
   11547 	wm_get_swfwhw_semaphore(sc);
   11548 
   11549 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11550 	if (((fwsm & FWSM_FW_VALID) == 0)
   11551 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11552 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11553 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11554 			reg |= CTRL_EXT_FORCE_SMBUS;
   11555 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11556 			CSR_WRITE_FLUSH(sc);
   11557 			delay(50*1000);
   11558 		}
   11559 
   11560 		/* Toggle LANPHYPC */
   11561 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11562 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11563 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11564 		CSR_WRITE_FLUSH(sc);
   11565 		delay(1000);
   11566 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11567 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11568 		CSR_WRITE_FLUSH(sc);
   11569 		delay(50*1000);
   11570 
   11571 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11572 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11573 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11574 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11575 		}
   11576 	}
   11577 
   11578 	/* Release semaphore */
   11579 	wm_put_swfwhw_semaphore(sc);
   11580 
   11581 	/*
   11582 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11583 	 */
   11584 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11585 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11586 }
   11587 
   11588 static void
   11589 wm_init_manageability(struct wm_softc *sc)
   11590 {
   11591 
   11592 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11593 		device_xname(sc->sc_dev), __func__));
   11594 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11595 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11596 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11597 
   11598 		/* Disable hardware interception of ARP */
   11599 		manc &= ~MANC_ARP_EN;
   11600 
   11601 		/* Enable receiving management packets to the host */
   11602 		if (sc->sc_type >= WM_T_82571) {
   11603 			manc |= MANC_EN_MNG2HOST;
   11604 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11605 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11606 		}
   11607 
   11608 		CSR_WRITE(sc, WMREG_MANC, manc);
   11609 	}
   11610 }
   11611 
   11612 static void
   11613 wm_release_manageability(struct wm_softc *sc)
   11614 {
   11615 
   11616 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11617 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11618 
   11619 		manc |= MANC_ARP_EN;
   11620 		if (sc->sc_type >= WM_T_82571)
   11621 			manc &= ~MANC_EN_MNG2HOST;
   11622 
   11623 		CSR_WRITE(sc, WMREG_MANC, manc);
   11624 	}
   11625 }
   11626 
   11627 static void
   11628 wm_get_wakeup(struct wm_softc *sc)
   11629 {
   11630 
   11631 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11632 	switch (sc->sc_type) {
   11633 	case WM_T_82573:
   11634 	case WM_T_82583:
   11635 		sc->sc_flags |= WM_F_HAS_AMT;
   11636 		/* FALLTHROUGH */
   11637 	case WM_T_80003:
   11638 	case WM_T_82541:
   11639 	case WM_T_82547:
   11640 	case WM_T_82571:
   11641 	case WM_T_82572:
   11642 	case WM_T_82574:
   11643 	case WM_T_82575:
   11644 	case WM_T_82576:
   11645 	case WM_T_82580:
   11646 	case WM_T_I350:
   11647 	case WM_T_I354:
   11648 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11649 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11650 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11651 		break;
   11652 	case WM_T_ICH8:
   11653 	case WM_T_ICH9:
   11654 	case WM_T_ICH10:
   11655 	case WM_T_PCH:
   11656 	case WM_T_PCH2:
   11657 	case WM_T_PCH_LPT:
   11658 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11659 		sc->sc_flags |= WM_F_HAS_AMT;
   11660 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11661 		break;
   11662 	default:
   11663 		break;
   11664 	}
   11665 
   11666 	/* 1: HAS_MANAGE */
   11667 	if (wm_enable_mng_pass_thru(sc) != 0)
   11668 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11669 
   11670 #ifdef WM_DEBUG
   11671 	printf("\n");
   11672 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11673 		printf("HAS_AMT,");
   11674 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11675 		printf("ARC_SUBSYS_VALID,");
   11676 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11677 		printf("ASF_FIRMWARE_PRES,");
   11678 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11679 		printf("HAS_MANAGE,");
   11680 	printf("\n");
   11681 #endif
   11682 	/*
   11683 	 * Note that the WOL flags is set after the resetting of the eeprom
   11684 	 * stuff
   11685 	 */
   11686 }
   11687 
   11688 #ifdef WM_WOL
   11689 /* WOL in the newer chipset interfaces (pchlan) */
   11690 static void
   11691 wm_enable_phy_wakeup(struct wm_softc *sc)
   11692 {
   11693 #if 0
   11694 	uint16_t preg;
   11695 
   11696 	/* Copy MAC RARs to PHY RARs */
   11697 
   11698 	/* Copy MAC MTA to PHY MTA */
   11699 
   11700 	/* Configure PHY Rx Control register */
   11701 
   11702 	/* Enable PHY wakeup in MAC register */
   11703 
   11704 	/* Configure and enable PHY wakeup in PHY registers */
   11705 
   11706 	/* Activate PHY wakeup */
   11707 
   11708 	/* XXX */
   11709 #endif
   11710 }
   11711 
   11712 /* Power down workaround on D3 */
   11713 static void
   11714 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11715 {
   11716 	uint32_t reg;
   11717 	int i;
   11718 
   11719 	for (i = 0; i < 2; i++) {
   11720 		/* Disable link */
   11721 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11722 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11723 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11724 
   11725 		/*
   11726 		 * Call gig speed drop workaround on Gig disable before
   11727 		 * accessing any PHY registers
   11728 		 */
   11729 		if (sc->sc_type == WM_T_ICH8)
   11730 			wm_gig_downshift_workaround_ich8lan(sc);
   11731 
   11732 		/* Write VR power-down enable */
   11733 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11734 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11735 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11736 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11737 
   11738 		/* Read it back and test */
   11739 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11740 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11741 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11742 			break;
   11743 
   11744 		/* Issue PHY reset and repeat at most one more time */
   11745 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11746 	}
   11747 }
   11748 
   11749 static void
   11750 wm_enable_wakeup(struct wm_softc *sc)
   11751 {
   11752 	uint32_t reg, pmreg;
   11753 	pcireg_t pmode;
   11754 
   11755 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11756 		&pmreg, NULL) == 0)
   11757 		return;
   11758 
   11759 	/* Advertise the wakeup capability */
   11760 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11761 	    | CTRL_SWDPIN(3));
   11762 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11763 
   11764 	/* ICH workaround */
   11765 	switch (sc->sc_type) {
   11766 	case WM_T_ICH8:
   11767 	case WM_T_ICH9:
   11768 	case WM_T_ICH10:
   11769 	case WM_T_PCH:
   11770 	case WM_T_PCH2:
   11771 	case WM_T_PCH_LPT:
   11772 	case WM_T_PCH_SPT:
   11773 		/* Disable gig during WOL */
   11774 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11775 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11776 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11777 		if (sc->sc_type == WM_T_PCH)
   11778 			wm_gmii_reset(sc);
   11779 
   11780 		/* Power down workaround */
   11781 		if (sc->sc_phytype == WMPHY_82577) {
   11782 			struct mii_softc *child;
   11783 
   11784 			/* Assume that the PHY is copper */
   11785 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11786 			if (child->mii_mpd_rev <= 2)
   11787 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11788 				    (768 << 5) | 25, 0x0444); /* magic num */
   11789 		}
   11790 		break;
   11791 	default:
   11792 		break;
   11793 	}
   11794 
   11795 	/* Keep the laser running on fiber adapters */
   11796 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11797 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11798 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11799 		reg |= CTRL_EXT_SWDPIN(3);
   11800 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11801 	}
   11802 
   11803 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11804 #if 0	/* for the multicast packet */
   11805 	reg |= WUFC_MC;
   11806 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11807 #endif
   11808 
   11809 	if (sc->sc_type == WM_T_PCH) {
   11810 		wm_enable_phy_wakeup(sc);
   11811 	} else {
   11812 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11813 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11814 	}
   11815 
   11816 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11817 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11818 		|| (sc->sc_type == WM_T_PCH2))
   11819 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11820 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11821 
   11822 	/* Request PME */
   11823 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11824 #if 0
   11825 	/* Disable WOL */
   11826 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11827 #else
   11828 	/* For WOL */
   11829 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11830 #endif
   11831 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11832 }
   11833 #endif /* WM_WOL */
   11834 
   11835 /* LPLU */
   11836 
   11837 static void
   11838 wm_lplu_d0_disable(struct wm_softc *sc)
   11839 {
   11840 	uint32_t reg;
   11841 
   11842 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11843 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11844 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11845 }
   11846 
   11847 static void
   11848 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11849 {
   11850 	uint32_t reg;
   11851 
   11852 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11853 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11854 	reg |= HV_OEM_BITS_ANEGNOW;
   11855 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11856 }
   11857 
   11858 /* EEE */
   11859 
   11860 static void
   11861 wm_set_eee_i350(struct wm_softc *sc)
   11862 {
   11863 	uint32_t ipcnfg, eeer;
   11864 
   11865 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11866 	eeer = CSR_READ(sc, WMREG_EEER);
   11867 
   11868 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11869 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11870 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11871 		    | EEER_LPI_FC);
   11872 	} else {
   11873 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11874 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11875 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11876 		    | EEER_LPI_FC);
   11877 	}
   11878 
   11879 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11880 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11881 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11882 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11883 }
   11884 
   11885 /*
   11886  * Workarounds (mainly PHY related).
   11887  * Basically, PHY's workarounds are in the PHY drivers.
   11888  */
   11889 
   11890 /* Work-around for 82566 Kumeran PCS lock loss */
   11891 static void
   11892 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11893 {
   11894 #if 0
   11895 	int miistatus, active, i;
   11896 	int reg;
   11897 
   11898 	miistatus = sc->sc_mii.mii_media_status;
   11899 
   11900 	/* If the link is not up, do nothing */
   11901 	if ((miistatus & IFM_ACTIVE) == 0)
   11902 		return;
   11903 
   11904 	active = sc->sc_mii.mii_media_active;
   11905 
   11906 	/* Nothing to do if the link is other than 1Gbps */
   11907 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11908 		return;
   11909 
   11910 	for (i = 0; i < 10; i++) {
   11911 		/* read twice */
   11912 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11913 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11914 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11915 			goto out;	/* GOOD! */
   11916 
   11917 		/* Reset the PHY */
   11918 		wm_gmii_reset(sc);
   11919 		delay(5*1000);
   11920 	}
   11921 
   11922 	/* Disable GigE link negotiation */
   11923 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11924 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11925 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11926 
   11927 	/*
   11928 	 * Call gig speed drop workaround on Gig disable before accessing
   11929 	 * any PHY registers.
   11930 	 */
   11931 	wm_gig_downshift_workaround_ich8lan(sc);
   11932 
   11933 out:
   11934 	return;
   11935 #endif
   11936 }
   11937 
   11938 /* WOL from S5 stops working */
   11939 static void
   11940 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11941 {
   11942 	uint16_t kmrn_reg;
   11943 
   11944 	/* Only for igp3 */
   11945 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11946 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11947 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11948 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11949 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11950 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11951 	}
   11952 }
   11953 
   11954 /*
   11955  * Workaround for pch's PHYs
   11956  * XXX should be moved to new PHY driver?
   11957  */
   11958 static void
   11959 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11960 {
   11961 
   11962 	KASSERT(sc->sc_type == WM_T_PCH);
   11963 
   11964 	if (sc->sc_phytype == WMPHY_82577)
   11965 		wm_set_mdio_slow_mode_hv(sc);
   11966 
   11967 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11968 
   11969 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11970 
   11971 	/* 82578 */
   11972 	if (sc->sc_phytype == WMPHY_82578) {
   11973 		/* PCH rev. < 3 */
   11974 		if (sc->sc_rev < 3) {
   11975 			/* XXX 6 bit shift? Why? Is it page2? */
   11976 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11977 			    0x66c0);
   11978 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11979 			    0xffff);
   11980 		}
   11981 
   11982 		/* XXX phy rev. < 2 */
   11983 	}
   11984 
   11985 	/* Select page 0 */
   11986 
   11987 	/* XXX acquire semaphore */
   11988 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11989 	/* XXX release semaphore */
   11990 
   11991 	/*
   11992 	 * Configure the K1 Si workaround during phy reset assuming there is
   11993 	 * link so that it disables K1 if link is in 1Gbps.
   11994 	 */
   11995 	wm_k1_gig_workaround_hv(sc, 1);
   11996 }
   11997 
   11998 static void
   11999 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12000 {
   12001 
   12002 	KASSERT(sc->sc_type == WM_T_PCH2);
   12003 
   12004 	wm_set_mdio_slow_mode_hv(sc);
   12005 }
   12006 
   12007 static void
   12008 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12009 {
   12010 	int k1_enable = sc->sc_nvm_k1_enabled;
   12011 
   12012 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12013 		device_xname(sc->sc_dev), __func__));
   12014 
   12015 	/* XXX acquire semaphore */
   12016 
   12017 	if (link) {
   12018 		k1_enable = 0;
   12019 
   12020 		/* Link stall fix for link up */
   12021 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12022 	} else {
   12023 		/* Link stall fix for link down */
   12024 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12025 	}
   12026 
   12027 	wm_configure_k1_ich8lan(sc, k1_enable);
   12028 
   12029 	/* XXX release semaphore */
   12030 }
   12031 
   12032 static void
   12033 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12034 {
   12035 	uint32_t reg;
   12036 
   12037 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12038 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12039 	    reg | HV_KMRN_MDIO_SLOW);
   12040 }
   12041 
   12042 static void
   12043 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12044 {
   12045 	uint32_t ctrl, ctrl_ext, tmp;
   12046 	uint16_t kmrn_reg;
   12047 
   12048 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12049 
   12050 	if (k1_enable)
   12051 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12052 	else
   12053 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12054 
   12055 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12056 
   12057 	delay(20);
   12058 
   12059 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12060 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12061 
   12062 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12063 	tmp |= CTRL_FRCSPD;
   12064 
   12065 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12066 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12067 	CSR_WRITE_FLUSH(sc);
   12068 	delay(20);
   12069 
   12070 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12071 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12072 	CSR_WRITE_FLUSH(sc);
   12073 	delay(20);
   12074 }
   12075 
   12076 /* special case - for 82575 - need to do manual init ... */
   12077 static void
   12078 wm_reset_init_script_82575(struct wm_softc *sc)
   12079 {
   12080 	/*
   12081 	 * remark: this is untested code - we have no board without EEPROM
   12082 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12083 	 */
   12084 
   12085 	/* SerDes configuration via SERDESCTRL */
   12086 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12087 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12088 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12089 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12090 
   12091 	/* CCM configuration via CCMCTL register */
   12092 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12093 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12094 
   12095 	/* PCIe lanes configuration */
   12096 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12097 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12098 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12099 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12100 
   12101 	/* PCIe PLL Configuration */
   12102 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12103 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12104 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12105 }
   12106 
   12107 static void
   12108 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12109 {
   12110 	uint32_t reg;
   12111 	uint16_t nvmword;
   12112 	int rv;
   12113 
   12114 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12115 		return;
   12116 
   12117 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12118 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12119 	if (rv != 0) {
   12120 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12121 		    __func__);
   12122 		return;
   12123 	}
   12124 
   12125 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12126 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12127 		reg |= MDICNFG_DEST;
   12128 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12129 		reg |= MDICNFG_COM_MDIO;
   12130 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12131 }
   12132 
   12133 /*
   12134  * I210 Errata 25 and I211 Errata 10
   12135  * Slow System Clock.
   12136  */
   12137 static void
   12138 wm_pll_workaround_i210(struct wm_softc *sc)
   12139 {
   12140 	uint32_t mdicnfg, wuc;
   12141 	uint32_t reg;
   12142 	pcireg_t pcireg;
   12143 	uint32_t pmreg;
   12144 	uint16_t nvmword, tmp_nvmword;
   12145 	int phyval;
   12146 	bool wa_done = false;
   12147 	int i;
   12148 
   12149 	/* Save WUC and MDICNFG registers */
   12150 	wuc = CSR_READ(sc, WMREG_WUC);
   12151 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12152 
   12153 	reg = mdicnfg & ~MDICNFG_DEST;
   12154 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12155 
   12156 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12157 		nvmword = INVM_DEFAULT_AL;
   12158 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12159 
   12160 	/* Get Power Management cap offset */
   12161 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12162 		&pmreg, NULL) == 0)
   12163 		return;
   12164 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12165 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12166 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12167 
   12168 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12169 			break; /* OK */
   12170 		}
   12171 
   12172 		wa_done = true;
   12173 		/* Directly reset the internal PHY */
   12174 		reg = CSR_READ(sc, WMREG_CTRL);
   12175 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12176 
   12177 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12178 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12179 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12180 
   12181 		CSR_WRITE(sc, WMREG_WUC, 0);
   12182 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12183 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12184 
   12185 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12186 		    pmreg + PCI_PMCSR);
   12187 		pcireg |= PCI_PMCSR_STATE_D3;
   12188 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12189 		    pmreg + PCI_PMCSR, pcireg);
   12190 		delay(1000);
   12191 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12192 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12193 		    pmreg + PCI_PMCSR, pcireg);
   12194 
   12195 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12196 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12197 
   12198 		/* Restore WUC register */
   12199 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12200 	}
   12201 
   12202 	/* Restore MDICNFG setting */
   12203 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12204 	if (wa_done)
   12205 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12206 }
   12207