Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.398
      1 /*	$NetBSD: if_wm.c,v 1.398 2016/05/18 06:55:51 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.398 2016/05/18 06:55:51 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 #define	WM_DEBUG_INIT		0x40
    150 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    151     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
    152 
    153 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    154 #else
    155 #define	DPRINTF(x, y)	/* nothing */
    156 #endif /* WM_DEBUG */
    157 
    158 #ifdef NET_MPSAFE
    159 #define WM_MPSAFE	1
    160 #endif
    161 
    162 /*
    163  * This device driver's max interrupt numbers.
    164  */
    165 #define WM_MAX_NTXINTR		16
    166 #define WM_MAX_NRXINTR		16
    167 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    168 
    169 /*
    170  * Transmit descriptor list size.  Due to errata, we can only have
    171  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    172  * on >= 82544.  We tell the upper layers that they can queue a lot
    173  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    174  * of them at a time.
    175  *
    176  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    177  * chains containing many small mbufs have been observed in zero-copy
    178  * situations with jumbo frames.
    179  */
    180 #define	WM_NTXSEGS		256
    181 #define	WM_IFQUEUELEN		256
    182 #define	WM_TXQUEUELEN_MAX	64
    183 #define	WM_TXQUEUELEN_MAX_82547	16
    184 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    185 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    186 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    187 #define	WM_NTXDESC_82542	256
    188 #define	WM_NTXDESC_82544	4096
    189 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    190 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    191 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    192 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    193 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    194 
    195 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    196 
    197 /*
    198  * Receive descriptor list size.  We have one Rx buffer for normal
    199  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    200  * packet.  We allocate 256 receive descriptors, each with a 2k
    201  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    202  */
    203 #define	WM_NRXDESC		256
    204 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    205 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    206 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    207 
    208 typedef union txdescs {
    209 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    210 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    211 } txdescs_t;
    212 
    213 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    214 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    215 
    216 /*
    217  * Software state for transmit jobs.
    218  */
    219 struct wm_txsoft {
    220 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    221 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    222 	int txs_firstdesc;		/* first descriptor in packet */
    223 	int txs_lastdesc;		/* last descriptor in packet */
    224 	int txs_ndesc;			/* # of descriptors used */
    225 };
    226 
    227 /*
    228  * Software state for receive buffers.  Each descriptor gets a
    229  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    230  * more than one buffer, we chain them together.
    231  */
    232 struct wm_rxsoft {
    233 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    234 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    235 };
    236 
    237 #define WM_LINKUP_TIMEOUT	50
    238 
    239 static uint16_t swfwphysem[] = {
    240 	SWFW_PHY0_SM,
    241 	SWFW_PHY1_SM,
    242 	SWFW_PHY2_SM,
    243 	SWFW_PHY3_SM
    244 };
    245 
    246 static const uint32_t wm_82580_rxpbs_table[] = {
    247 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    248 };
    249 
    250 struct wm_softc;
    251 
    252 struct wm_txqueue {
    253 	kmutex_t *txq_lock;		/* lock for tx operations */
    254 
    255 	struct wm_softc *txq_sc;
    256 
    257 	int txq_id;			/* index of transmit queues */
    258 	int txq_intr_idx;		/* index of MSI-X tables */
    259 
    260 	/* Software state for the transmit descriptors. */
    261 	int txq_num;			/* must be a power of two */
    262 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    263 
    264 	/* TX control data structures. */
    265 	int txq_ndesc;			/* must be a power of two */
    266 	size_t txq_descsize;		/* a tx descriptor size */
    267 	txdescs_t *txq_descs_u;
    268         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    269 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    270 	int txq_desc_rseg;		/* real number of control segment */
    271 	size_t txq_descs_size;		/* control data size */
    272 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    273 #define	txq_descs	txq_descs_u->sctxu_txdescs
    274 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    275 
    276 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    277 
    278 	int txq_free;			/* number of free Tx descriptors */
    279 	int txq_next;			/* next ready Tx descriptor */
    280 
    281 	int txq_sfree;			/* number of free Tx jobs */
    282 	int txq_snext;			/* next free Tx job */
    283 	int txq_sdirty;			/* dirty Tx jobs */
    284 
    285 	/* These 4 variables are used only on the 82547. */
    286 	int txq_fifo_size;		/* Tx FIFO size */
    287 	int txq_fifo_head;		/* current head of FIFO */
    288 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    289 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    290 
    291 	/* XXX which event counter is required? */
    292 };
    293 
    294 struct wm_rxqueue {
    295 	kmutex_t *rxq_lock;		/* lock for rx operations */
    296 
    297 	struct wm_softc *rxq_sc;
    298 
    299 	int rxq_id;			/* index of receive queues */
    300 	int rxq_intr_idx;		/* index of MSI-X tables */
    301 
    302 	/* Software state for the receive descriptors. */
    303 	wiseman_rxdesc_t *rxq_descs;
    304 
    305 	/* RX control data structures. */
    306 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    307 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    308 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    309 	int rxq_desc_rseg;		/* real number of control segment */
    310 	size_t rxq_desc_size;		/* control data size */
    311 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    312 
    313 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    314 
    315 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    316 	int rxq_discard;
    317 	int rxq_len;
    318 	struct mbuf *rxq_head;
    319 	struct mbuf *rxq_tail;
    320 	struct mbuf **rxq_tailp;
    321 
    322 	/* XXX which event counter is required? */
    323 };
    324 
    325 /*
    326  * Software state per device.
    327  */
    328 struct wm_softc {
    329 	device_t sc_dev;		/* generic device information */
    330 	bus_space_tag_t sc_st;		/* bus space tag */
    331 	bus_space_handle_t sc_sh;	/* bus space handle */
    332 	bus_size_t sc_ss;		/* bus space size */
    333 	bus_space_tag_t sc_iot;		/* I/O space tag */
    334 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    335 	bus_size_t sc_ios;		/* I/O space size */
    336 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    337 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    338 	bus_size_t sc_flashs;		/* flash registers space size */
    339 	off_t sc_flashreg_offset;	/*
    340 					 * offset to flash registers from
    341 					 * start of BAR
    342 					 */
    343 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    344 
    345 	struct ethercom sc_ethercom;	/* ethernet common data */
    346 	struct mii_data sc_mii;		/* MII/media information */
    347 
    348 	pci_chipset_tag_t sc_pc;
    349 	pcitag_t sc_pcitag;
    350 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    351 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    352 
    353 	uint16_t sc_pcidevid;		/* PCI device ID */
    354 	wm_chip_type sc_type;		/* MAC type */
    355 	int sc_rev;			/* MAC revision */
    356 	wm_phy_type sc_phytype;		/* PHY type */
    357 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    358 #define	WM_MEDIATYPE_UNKNOWN		0x00
    359 #define	WM_MEDIATYPE_FIBER		0x01
    360 #define	WM_MEDIATYPE_COPPER		0x02
    361 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    362 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    363 	int sc_flags;			/* flags; see below */
    364 	int sc_if_flags;		/* last if_flags */
    365 	int sc_flowflags;		/* 802.3x flow control flags */
    366 	int sc_align_tweak;
    367 
    368 	void *sc_ihs[WM_MAX_NINTR];	/*
    369 					 * interrupt cookie.
    370 					 * legacy and msi use sc_ihs[0].
    371 					 */
    372 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    373 	int sc_nintrs;			/* number of interrupts */
    374 
    375 	int sc_link_intr_idx;		/* index of MSI-X tables */
    376 
    377 	callout_t sc_tick_ch;		/* tick callout */
    378 	bool sc_stopping;
    379 
    380 	int sc_nvm_ver_major;
    381 	int sc_nvm_ver_minor;
    382 	int sc_nvm_ver_build;
    383 	int sc_nvm_addrbits;		/* NVM address bits */
    384 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    385 	int sc_ich8_flash_base;
    386 	int sc_ich8_flash_bank_size;
    387 	int sc_nvm_k1_enabled;
    388 
    389 	int sc_ntxqueues;
    390 	struct wm_txqueue *sc_txq;
    391 
    392 	int sc_nrxqueues;
    393 	struct wm_rxqueue *sc_rxq;
    394 
    395 #ifdef WM_EVENT_COUNTERS
    396 	/* Event counters. */
    397 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    398 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    399 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    400 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    401 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    402 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    403 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    404 
    405 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    406 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    407 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    408 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    409 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    410 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    411 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    412 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    413 
    414 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    415 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
    416 
    417 	struct evcnt sc_ev_tu;		/* Tx underrun */
    418 
    419 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    420 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    421 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    422 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    423 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    424 #endif /* WM_EVENT_COUNTERS */
    425 
    426 	/* This variable are used only on the 82547. */
    427 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    428 
    429 	uint32_t sc_ctrl;		/* prototype CTRL register */
    430 #if 0
    431 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    432 #endif
    433 	uint32_t sc_icr;		/* prototype interrupt bits */
    434 	uint32_t sc_itr;		/* prototype intr throttling reg */
    435 	uint32_t sc_tctl;		/* prototype TCTL register */
    436 	uint32_t sc_rctl;		/* prototype RCTL register */
    437 	uint32_t sc_txcw;		/* prototype TXCW register */
    438 	uint32_t sc_tipg;		/* prototype TIPG register */
    439 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    440 	uint32_t sc_pba;		/* prototype PBA register */
    441 
    442 	int sc_tbi_linkup;		/* TBI link status */
    443 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    444 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    445 
    446 	int sc_mchash_type;		/* multicast filter offset */
    447 
    448 	krndsource_t rnd_source;	/* random source */
    449 
    450 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    451 
    452 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    453 };
    454 
    455 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    456 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    457 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    458 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    459 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    460 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    461 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    462 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    463 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    464 
    465 #ifdef WM_MPSAFE
    466 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    467 #else
    468 #define CALLOUT_FLAGS	0
    469 #endif
    470 
    471 #define	WM_RXCHAIN_RESET(rxq)						\
    472 do {									\
    473 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    474 	*(rxq)->rxq_tailp = NULL;					\
    475 	(rxq)->rxq_len = 0;						\
    476 } while (/*CONSTCOND*/0)
    477 
    478 #define	WM_RXCHAIN_LINK(rxq, m)						\
    479 do {									\
    480 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    481 	(rxq)->rxq_tailp = &(m)->m_next;				\
    482 } while (/*CONSTCOND*/0)
    483 
    484 #ifdef WM_EVENT_COUNTERS
    485 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    486 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    487 #else
    488 #define	WM_EVCNT_INCR(ev)	/* nothing */
    489 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    490 #endif
    491 
    492 #define	CSR_READ(sc, reg)						\
    493 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    494 #define	CSR_WRITE(sc, reg, val)						\
    495 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    496 #define	CSR_WRITE_FLUSH(sc)						\
    497 	(void) CSR_READ((sc), WMREG_STATUS)
    498 
    499 #define ICH8_FLASH_READ32(sc, reg)					\
    500 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    501 	    (reg) + sc->sc_flashreg_offset)
    502 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    503 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    504 	    (reg) + sc->sc_flashreg_offset, (data))
    505 
    506 #define ICH8_FLASH_READ16(sc, reg)					\
    507 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    508 	    (reg) + sc->sc_flashreg_offset)
    509 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    510 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    511 	    (reg) + sc->sc_flashreg_offset, (data))
    512 
    513 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    514 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    515 
    516 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    517 #define	WM_CDTXADDR_HI(txq, x)						\
    518 	(sizeof(bus_addr_t) == 8 ?					\
    519 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    520 
    521 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    522 #define	WM_CDRXADDR_HI(rxq, x)						\
    523 	(sizeof(bus_addr_t) == 8 ?					\
    524 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    525 
    526 /*
    527  * Register read/write functions.
    528  * Other than CSR_{READ|WRITE}().
    529  */
    530 #if 0
    531 static inline uint32_t wm_io_read(struct wm_softc *, int);
    532 #endif
    533 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    534 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    535 	uint32_t, uint32_t);
    536 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    537 
    538 /*
    539  * Descriptor sync/init functions.
    540  */
    541 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    542 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    543 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    544 
    545 /*
    546  * Device driver interface functions and commonly used functions.
    547  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    548  */
    549 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    550 static int	wm_match(device_t, cfdata_t, void *);
    551 static void	wm_attach(device_t, device_t, void *);
    552 static int	wm_detach(device_t, int);
    553 static bool	wm_suspend(device_t, const pmf_qual_t *);
    554 static bool	wm_resume(device_t, const pmf_qual_t *);
    555 static void	wm_watchdog(struct ifnet *);
    556 static void	wm_tick(void *);
    557 static int	wm_ifflags_cb(struct ethercom *);
    558 static int	wm_ioctl(struct ifnet *, u_long, void *);
    559 /* MAC address related */
    560 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    561 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    562 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    563 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    564 static void	wm_set_filter(struct wm_softc *);
    565 /* Reset and init related */
    566 static void	wm_set_vlan(struct wm_softc *);
    567 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    568 static void	wm_get_auto_rd_done(struct wm_softc *);
    569 static void	wm_lan_init_done(struct wm_softc *);
    570 static void	wm_get_cfg_done(struct wm_softc *);
    571 static void	wm_initialize_hardware_bits(struct wm_softc *);
    572 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    573 static void	wm_reset(struct wm_softc *);
    574 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    575 static void	wm_rxdrain(struct wm_rxqueue *);
    576 static void	wm_rss_getkey(uint8_t *);
    577 static void	wm_init_rss(struct wm_softc *);
    578 static void	wm_adjust_qnum(struct wm_softc *, int);
    579 static int	wm_setup_legacy(struct wm_softc *);
    580 static int	wm_setup_msix(struct wm_softc *);
    581 static int	wm_init(struct ifnet *);
    582 static int	wm_init_locked(struct ifnet *);
    583 static void	wm_stop(struct ifnet *, int);
    584 static void	wm_stop_locked(struct ifnet *, int);
    585 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    586 static void	wm_82547_txfifo_stall(void *);
    587 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    588 /* DMA related */
    589 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    590 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    591 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    592 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    593 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    594 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    595 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    596 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    597 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    598 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    599 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    600 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    601 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    602 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    603 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    604 static int	wm_alloc_txrx_queues(struct wm_softc *);
    605 static void	wm_free_txrx_queues(struct wm_softc *);
    606 static int	wm_init_txrx_queues(struct wm_softc *);
    607 /* Start */
    608 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    609     uint32_t *, uint8_t *);
    610 static void	wm_start(struct ifnet *);
    611 static void	wm_start_locked(struct ifnet *);
    612 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    613     uint32_t *, uint32_t *, bool *);
    614 static void	wm_nq_start(struct ifnet *);
    615 static void	wm_nq_start_locked(struct ifnet *);
    616 /* Interrupt */
    617 static int	wm_txeof(struct wm_softc *);
    618 static void	wm_rxeof(struct wm_rxqueue *);
    619 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    620 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    621 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    622 static void	wm_linkintr(struct wm_softc *, uint32_t);
    623 static int	wm_intr_legacy(void *);
    624 static int	wm_txintr_msix(void *);
    625 static int	wm_rxintr_msix(void *);
    626 static int	wm_linkintr_msix(void *);
    627 
    628 /*
    629  * Media related.
    630  * GMII, SGMII, TBI, SERDES and SFP.
    631  */
    632 /* Common */
    633 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    634 /* GMII related */
    635 static void	wm_gmii_reset(struct wm_softc *);
    636 static int	wm_get_phy_id_82575(struct wm_softc *);
    637 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    638 static int	wm_gmii_mediachange(struct ifnet *);
    639 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    640 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    641 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    642 static int	wm_gmii_i82543_readreg(device_t, int, int);
    643 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    644 static int	wm_gmii_i82544_readreg(device_t, int, int);
    645 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    646 static int	wm_gmii_i80003_readreg(device_t, int, int);
    647 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    648 static int	wm_gmii_bm_readreg(device_t, int, int);
    649 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    650 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    651 static int	wm_gmii_hv_readreg(device_t, int, int);
    652 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    653 static int	wm_gmii_82580_readreg(device_t, int, int);
    654 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    655 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    656 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    657 static void	wm_gmii_statchg(struct ifnet *);
    658 static int	wm_kmrn_readreg(struct wm_softc *, int);
    659 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    660 /* SGMII */
    661 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    662 static int	wm_sgmii_readreg(device_t, int, int);
    663 static void	wm_sgmii_writereg(device_t, int, int, int);
    664 /* TBI related */
    665 static void	wm_tbi_mediainit(struct wm_softc *);
    666 static int	wm_tbi_mediachange(struct ifnet *);
    667 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    668 static int	wm_check_for_link(struct wm_softc *);
    669 static void	wm_tbi_tick(struct wm_softc *);
    670 /* SERDES related */
    671 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    672 static int	wm_serdes_mediachange(struct ifnet *);
    673 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    674 static void	wm_serdes_tick(struct wm_softc *);
    675 /* SFP related */
    676 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    677 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    678 
    679 /*
    680  * NVM related.
    681  * Microwire, SPI (w/wo EERD) and Flash.
    682  */
    683 /* Misc functions */
    684 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    685 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    686 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    687 /* Microwire */
    688 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    689 /* SPI */
    690 static int	wm_nvm_ready_spi(struct wm_softc *);
    691 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    692 /* Using with EERD */
    693 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    694 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    695 /* Flash */
    696 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    697     unsigned int *);
    698 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    699 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    700 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    701 	uint32_t *);
    702 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    703 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    704 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    705 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    706 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    707 /* iNVM */
    708 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    709 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    710 /* Lock, detecting NVM type, validate checksum and read */
    711 static int	wm_nvm_acquire(struct wm_softc *);
    712 static void	wm_nvm_release(struct wm_softc *);
    713 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    714 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    715 static int	wm_nvm_validate_checksum(struct wm_softc *);
    716 static void	wm_nvm_version_invm(struct wm_softc *);
    717 static void	wm_nvm_version(struct wm_softc *);
    718 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    719 
    720 /*
    721  * Hardware semaphores.
    722  * Very complexed...
    723  */
    724 static int	wm_get_swsm_semaphore(struct wm_softc *);
    725 static void	wm_put_swsm_semaphore(struct wm_softc *);
    726 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    727 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    728 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    729 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    730 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    731 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    732 
    733 /*
    734  * Management mode and power management related subroutines.
    735  * BMC, AMT, suspend/resume and EEE.
    736  */
    737 #ifdef WM_WOL
    738 static int	wm_check_mng_mode(struct wm_softc *);
    739 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    740 static int	wm_check_mng_mode_82574(struct wm_softc *);
    741 static int	wm_check_mng_mode_generic(struct wm_softc *);
    742 #endif
    743 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    744 static bool	wm_phy_resetisblocked(struct wm_softc *);
    745 static void	wm_get_hw_control(struct wm_softc *);
    746 static void	wm_release_hw_control(struct wm_softc *);
    747 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    748 static void	wm_smbustopci(struct wm_softc *);
    749 static void	wm_init_manageability(struct wm_softc *);
    750 static void	wm_release_manageability(struct wm_softc *);
    751 static void	wm_get_wakeup(struct wm_softc *);
    752 #ifdef WM_WOL
    753 static void	wm_enable_phy_wakeup(struct wm_softc *);
    754 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    755 static void	wm_enable_wakeup(struct wm_softc *);
    756 #endif
    757 /* LPLU (Low Power Link Up) */
    758 static void	wm_lplu_d0_disable(struct wm_softc *);
    759 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    760 /* EEE */
    761 static void	wm_set_eee_i350(struct wm_softc *);
    762 
    763 /*
    764  * Workarounds (mainly PHY related).
    765  * Basically, PHY's workarounds are in the PHY drivers.
    766  */
    767 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    768 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    769 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    770 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    771 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    772 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    773 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    774 static void	wm_reset_init_script_82575(struct wm_softc *);
    775 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    776 static void	wm_pll_workaround_i210(struct wm_softc *);
    777 
    778 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    779     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    780 
    781 /*
    782  * Devices supported by this driver.
    783  */
    784 static const struct wm_product {
    785 	pci_vendor_id_t		wmp_vendor;
    786 	pci_product_id_t	wmp_product;
    787 	const char		*wmp_name;
    788 	wm_chip_type		wmp_type;
    789 	uint32_t		wmp_flags;
    790 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    791 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    792 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    793 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    794 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    795 } wm_products[] = {
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    797 	  "Intel i82542 1000BASE-X Ethernet",
    798 	  WM_T_82542_2_1,	WMP_F_FIBER },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    801 	  "Intel i82543GC 1000BASE-X Ethernet",
    802 	  WM_T_82543,		WMP_F_FIBER },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    805 	  "Intel i82543GC 1000BASE-T Ethernet",
    806 	  WM_T_82543,		WMP_F_COPPER },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    809 	  "Intel i82544EI 1000BASE-T Ethernet",
    810 	  WM_T_82544,		WMP_F_COPPER },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    813 	  "Intel i82544EI 1000BASE-X Ethernet",
    814 	  WM_T_82544,		WMP_F_FIBER },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    817 	  "Intel i82544GC 1000BASE-T Ethernet",
    818 	  WM_T_82544,		WMP_F_COPPER },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    821 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    822 	  WM_T_82544,		WMP_F_COPPER },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    825 	  "Intel i82540EM 1000BASE-T Ethernet",
    826 	  WM_T_82540,		WMP_F_COPPER },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    829 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    830 	  WM_T_82540,		WMP_F_COPPER },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    833 	  "Intel i82540EP 1000BASE-T Ethernet",
    834 	  WM_T_82540,		WMP_F_COPPER },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    837 	  "Intel i82540EP 1000BASE-T Ethernet",
    838 	  WM_T_82540,		WMP_F_COPPER },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    841 	  "Intel i82540EP 1000BASE-T Ethernet",
    842 	  WM_T_82540,		WMP_F_COPPER },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    845 	  "Intel i82545EM 1000BASE-T Ethernet",
    846 	  WM_T_82545,		WMP_F_COPPER },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    849 	  "Intel i82545GM 1000BASE-T Ethernet",
    850 	  WM_T_82545_3,		WMP_F_COPPER },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    853 	  "Intel i82545GM 1000BASE-X Ethernet",
    854 	  WM_T_82545_3,		WMP_F_FIBER },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    857 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    858 	  WM_T_82545_3,		WMP_F_SERDES },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    861 	  "Intel i82546EB 1000BASE-T Ethernet",
    862 	  WM_T_82546,		WMP_F_COPPER },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    865 	  "Intel i82546EB 1000BASE-T Ethernet",
    866 	  WM_T_82546,		WMP_F_COPPER },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    869 	  "Intel i82545EM 1000BASE-X Ethernet",
    870 	  WM_T_82545,		WMP_F_FIBER },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    873 	  "Intel i82546EB 1000BASE-X Ethernet",
    874 	  WM_T_82546,		WMP_F_FIBER },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    877 	  "Intel i82546GB 1000BASE-T Ethernet",
    878 	  WM_T_82546_3,		WMP_F_COPPER },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    881 	  "Intel i82546GB 1000BASE-X Ethernet",
    882 	  WM_T_82546_3,		WMP_F_FIBER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    885 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    886 	  WM_T_82546_3,		WMP_F_SERDES },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    889 	  "i82546GB quad-port Gigabit Ethernet",
    890 	  WM_T_82546_3,		WMP_F_COPPER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    893 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    894 	  WM_T_82546_3,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    897 	  "Intel PRO/1000MT (82546GB)",
    898 	  WM_T_82546_3,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    901 	  "Intel i82541EI 1000BASE-T Ethernet",
    902 	  WM_T_82541,		WMP_F_COPPER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    905 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    906 	  WM_T_82541,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    909 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    910 	  WM_T_82541,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    913 	  "Intel i82541ER 1000BASE-T Ethernet",
    914 	  WM_T_82541_2,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    917 	  "Intel i82541GI 1000BASE-T Ethernet",
    918 	  WM_T_82541_2,		WMP_F_COPPER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    921 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    922 	  WM_T_82541_2,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    925 	  "Intel i82541PI 1000BASE-T Ethernet",
    926 	  WM_T_82541_2,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    929 	  "Intel i82547EI 1000BASE-T Ethernet",
    930 	  WM_T_82547,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    933 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    934 	  WM_T_82547,		WMP_F_COPPER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    937 	  "Intel i82547GI 1000BASE-T Ethernet",
    938 	  WM_T_82547_2,		WMP_F_COPPER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    941 	  "Intel PRO/1000 PT (82571EB)",
    942 	  WM_T_82571,		WMP_F_COPPER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    945 	  "Intel PRO/1000 PF (82571EB)",
    946 	  WM_T_82571,		WMP_F_FIBER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    949 	  "Intel PRO/1000 PB (82571EB)",
    950 	  WM_T_82571,		WMP_F_SERDES },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    953 	  "Intel PRO/1000 QT (82571EB)",
    954 	  WM_T_82571,		WMP_F_COPPER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    957 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    958 	  WM_T_82571,		WMP_F_COPPER, },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    961 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    962 	  WM_T_82571,		WMP_F_COPPER, },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    965 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    966 	  WM_T_82571,		WMP_F_SERDES, },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    969 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    970 	  WM_T_82571,		WMP_F_SERDES, },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    973 	  "Intel 82571EB Quad 1000baseX Ethernet",
    974 	  WM_T_82571,		WMP_F_FIBER, },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    977 	  "Intel i82572EI 1000baseT Ethernet",
    978 	  WM_T_82572,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    981 	  "Intel i82572EI 1000baseX Ethernet",
    982 	  WM_T_82572,		WMP_F_FIBER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    985 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    986 	  WM_T_82572,		WMP_F_SERDES },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    989 	  "Intel i82572EI 1000baseT Ethernet",
    990 	  WM_T_82572,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    993 	  "Intel i82573E",
    994 	  WM_T_82573,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    997 	  "Intel i82573E IAMT",
    998 	  WM_T_82573,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1001 	  "Intel i82573L Gigabit Ethernet",
   1002 	  WM_T_82573,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1005 	  "Intel i82574L",
   1006 	  WM_T_82574,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1009 	  "Intel i82574L",
   1010 	  WM_T_82574,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1013 	  "Intel i82583V",
   1014 	  WM_T_82583,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1017 	  "i80003 dual 1000baseT Ethernet",
   1018 	  WM_T_80003,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1021 	  "i80003 dual 1000baseX Ethernet",
   1022 	  WM_T_80003,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1025 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1026 	  WM_T_80003,		WMP_F_SERDES },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1029 	  "Intel i80003 1000baseT Ethernet",
   1030 	  WM_T_80003,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1033 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1034 	  WM_T_80003,		WMP_F_SERDES },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1037 	  "Intel i82801H (M_AMT) LAN Controller",
   1038 	  WM_T_ICH8,		WMP_F_COPPER },
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1040 	  "Intel i82801H (AMT) LAN Controller",
   1041 	  WM_T_ICH8,		WMP_F_COPPER },
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1043 	  "Intel i82801H LAN Controller",
   1044 	  WM_T_ICH8,		WMP_F_COPPER },
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1046 	  "Intel i82801H (IFE) LAN Controller",
   1047 	  WM_T_ICH8,		WMP_F_COPPER },
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1049 	  "Intel i82801H (M) LAN Controller",
   1050 	  WM_T_ICH8,		WMP_F_COPPER },
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1052 	  "Intel i82801H IFE (GT) LAN Controller",
   1053 	  WM_T_ICH8,		WMP_F_COPPER },
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1055 	  "Intel i82801H IFE (G) LAN Controller",
   1056 	  WM_T_ICH8,		WMP_F_COPPER },
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1058 	  "82801I (AMT) LAN Controller",
   1059 	  WM_T_ICH9,		WMP_F_COPPER },
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1061 	  "82801I LAN Controller",
   1062 	  WM_T_ICH9,		WMP_F_COPPER },
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1064 	  "82801I (G) LAN Controller",
   1065 	  WM_T_ICH9,		WMP_F_COPPER },
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1067 	  "82801I (GT) LAN Controller",
   1068 	  WM_T_ICH9,		WMP_F_COPPER },
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1070 	  "82801I (C) LAN Controller",
   1071 	  WM_T_ICH9,		WMP_F_COPPER },
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1073 	  "82801I mobile LAN Controller",
   1074 	  WM_T_ICH9,		WMP_F_COPPER },
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1076 	  "82801I mobile (V) LAN Controller",
   1077 	  WM_T_ICH9,		WMP_F_COPPER },
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1079 	  "82801I mobile (AMT) LAN Controller",
   1080 	  WM_T_ICH9,		WMP_F_COPPER },
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1082 	  "82567LM-4 LAN Controller",
   1083 	  WM_T_ICH9,		WMP_F_COPPER },
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1085 	  "82567V-3 LAN Controller",
   1086 	  WM_T_ICH9,		WMP_F_COPPER },
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1088 	  "82567LM-2 LAN Controller",
   1089 	  WM_T_ICH10,		WMP_F_COPPER },
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1091 	  "82567LF-2 LAN Controller",
   1092 	  WM_T_ICH10,		WMP_F_COPPER },
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1094 	  "82567LM-3 LAN Controller",
   1095 	  WM_T_ICH10,		WMP_F_COPPER },
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1097 	  "82567LF-3 LAN Controller",
   1098 	  WM_T_ICH10,		WMP_F_COPPER },
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1100 	  "82567V-2 LAN Controller",
   1101 	  WM_T_ICH10,		WMP_F_COPPER },
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1103 	  "82567V-3? LAN Controller",
   1104 	  WM_T_ICH10,		WMP_F_COPPER },
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1106 	  "HANKSVILLE LAN Controller",
   1107 	  WM_T_ICH10,		WMP_F_COPPER },
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1109 	  "PCH LAN (82577LM) Controller",
   1110 	  WM_T_PCH,		WMP_F_COPPER },
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1112 	  "PCH LAN (82577LC) Controller",
   1113 	  WM_T_PCH,		WMP_F_COPPER },
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1115 	  "PCH LAN (82578DM) Controller",
   1116 	  WM_T_PCH,		WMP_F_COPPER },
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1118 	  "PCH LAN (82578DC) Controller",
   1119 	  WM_T_PCH,		WMP_F_COPPER },
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1121 	  "PCH2 LAN (82579LM) Controller",
   1122 	  WM_T_PCH2,		WMP_F_COPPER },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1124 	  "PCH2 LAN (82579V) Controller",
   1125 	  WM_T_PCH2,		WMP_F_COPPER },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1127 	  "82575EB dual-1000baseT Ethernet",
   1128 	  WM_T_82575,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1130 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1131 	  WM_T_82575,		WMP_F_SERDES },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1133 	  "82575GB quad-1000baseT Ethernet",
   1134 	  WM_T_82575,		WMP_F_COPPER },
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1136 	  "82575GB quad-1000baseT Ethernet (PM)",
   1137 	  WM_T_82575,		WMP_F_COPPER },
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1139 	  "82576 1000BaseT Ethernet",
   1140 	  WM_T_82576,		WMP_F_COPPER },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1142 	  "82576 1000BaseX Ethernet",
   1143 	  WM_T_82576,		WMP_F_FIBER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1146 	  "82576 gigabit Ethernet (SERDES)",
   1147 	  WM_T_82576,		WMP_F_SERDES },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1150 	  "82576 quad-1000BaseT Ethernet",
   1151 	  WM_T_82576,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1154 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1155 	  WM_T_82576,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1158 	  "82576 gigabit Ethernet",
   1159 	  WM_T_82576,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1162 	  "82576 gigabit Ethernet (SERDES)",
   1163 	  WM_T_82576,		WMP_F_SERDES },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1165 	  "82576 quad-gigabit Ethernet (SERDES)",
   1166 	  WM_T_82576,		WMP_F_SERDES },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1169 	  "82580 1000BaseT Ethernet",
   1170 	  WM_T_82580,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1172 	  "82580 1000BaseX Ethernet",
   1173 	  WM_T_82580,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1176 	  "82580 1000BaseT Ethernet (SERDES)",
   1177 	  WM_T_82580,		WMP_F_SERDES },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1180 	  "82580 gigabit Ethernet (SGMII)",
   1181 	  WM_T_82580,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1183 	  "82580 dual-1000BaseT Ethernet",
   1184 	  WM_T_82580,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1187 	  "82580 quad-1000BaseX Ethernet",
   1188 	  WM_T_82580,		WMP_F_FIBER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1191 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1192 	  WM_T_82580,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1195 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1196 	  WM_T_82580,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1199 	  "DH89XXCC 1000BASE-KX Ethernet",
   1200 	  WM_T_82580,		WMP_F_SERDES },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1203 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1204 	  WM_T_82580,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1207 	  "I350 Gigabit Network Connection",
   1208 	  WM_T_I350,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1211 	  "I350 Gigabit Fiber Network Connection",
   1212 	  WM_T_I350,		WMP_F_FIBER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1215 	  "I350 Gigabit Backplane Connection",
   1216 	  WM_T_I350,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1219 	  "I350 Quad Port Gigabit Ethernet",
   1220 	  WM_T_I350,		WMP_F_SERDES },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1223 	  "I350 Gigabit Connection",
   1224 	  WM_T_I350,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1227 	  "I354 Gigabit Ethernet (KX)",
   1228 	  WM_T_I354,		WMP_F_SERDES },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1231 	  "I354 Gigabit Ethernet (SGMII)",
   1232 	  WM_T_I354,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1235 	  "I354 Gigabit Ethernet (2.5G)",
   1236 	  WM_T_I354,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1239 	  "I210-T1 Ethernet Server Adapter",
   1240 	  WM_T_I210,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1243 	  "I210 Ethernet (Copper OEM)",
   1244 	  WM_T_I210,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1247 	  "I210 Ethernet (Copper IT)",
   1248 	  WM_T_I210,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1251 	  "I210 Ethernet (FLASH less)",
   1252 	  WM_T_I210,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1255 	  "I210 Gigabit Ethernet (Fiber)",
   1256 	  WM_T_I210,		WMP_F_FIBER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1259 	  "I210 Gigabit Ethernet (SERDES)",
   1260 	  WM_T_I210,		WMP_F_SERDES },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1263 	  "I210 Gigabit Ethernet (FLASH less)",
   1264 	  WM_T_I210,		WMP_F_SERDES },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1267 	  "I210 Gigabit Ethernet (SGMII)",
   1268 	  WM_T_I210,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1271 	  "I211 Ethernet (COPPER)",
   1272 	  WM_T_I211,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1274 	  "I217 V Ethernet Connection",
   1275 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1277 	  "I217 LM Ethernet Connection",
   1278 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1280 	  "I218 V Ethernet Connection",
   1281 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1283 	  "I218 V Ethernet Connection",
   1284 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1286 	  "I218 V Ethernet Connection",
   1287 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1289 	  "I218 LM Ethernet Connection",
   1290 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1292 	  "I218 LM Ethernet Connection",
   1293 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1295 	  "I218 LM Ethernet Connection",
   1296 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1297 #if 0
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1299 	  "I219 V Ethernet Connection",
   1300 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1302 	  "I219 V Ethernet Connection",
   1303 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1305 	  "I219 LM Ethernet Connection",
   1306 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1308 	  "I219 LM Ethernet Connection",
   1309 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1310 #endif
   1311 	{ 0,			0,
   1312 	  NULL,
   1313 	  0,			0 },
   1314 };
   1315 
   1316 #ifdef WM_EVENT_COUNTERS
   1317 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1318 #endif /* WM_EVENT_COUNTERS */
   1319 
   1320 
   1321 /*
   1322  * Register read/write functions.
   1323  * Other than CSR_{READ|WRITE}().
   1324  */
   1325 
   1326 #if 0 /* Not currently used */
   1327 static inline uint32_t
   1328 wm_io_read(struct wm_softc *sc, int reg)
   1329 {
   1330 
   1331 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1332 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1333 }
   1334 #endif
   1335 
   1336 static inline void
   1337 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1338 {
   1339 
   1340 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1341 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1342 }
   1343 
   1344 static inline void
   1345 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1346     uint32_t data)
   1347 {
   1348 	uint32_t regval;
   1349 	int i;
   1350 
   1351 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1352 
   1353 	CSR_WRITE(sc, reg, regval);
   1354 
   1355 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1356 		delay(5);
   1357 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1358 			break;
   1359 	}
   1360 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1361 		aprint_error("%s: WARNING:"
   1362 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1363 		    device_xname(sc->sc_dev), reg);
   1364 	}
   1365 }
   1366 
   1367 static inline void
   1368 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1369 {
   1370 	wa->wa_low = htole32(v & 0xffffffffU);
   1371 	if (sizeof(bus_addr_t) == 8)
   1372 		wa->wa_high = htole32((uint64_t) v >> 32);
   1373 	else
   1374 		wa->wa_high = 0;
   1375 }
   1376 
   1377 /*
   1378  * Descriptor sync/init functions.
   1379  */
   1380 static inline void
   1381 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1382 {
   1383 	struct wm_softc *sc = txq->txq_sc;
   1384 
   1385 	/* If it will wrap around, sync to the end of the ring. */
   1386 	if ((start + num) > WM_NTXDESC(txq)) {
   1387 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1388 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1389 		    (WM_NTXDESC(txq) - start), ops);
   1390 		num -= (WM_NTXDESC(txq) - start);
   1391 		start = 0;
   1392 	}
   1393 
   1394 	/* Now sync whatever is left. */
   1395 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1396 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1397 }
   1398 
   1399 static inline void
   1400 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1401 {
   1402 	struct wm_softc *sc = rxq->rxq_sc;
   1403 
   1404 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1405 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1406 }
   1407 
   1408 static inline void
   1409 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1410 {
   1411 	struct wm_softc *sc = rxq->rxq_sc;
   1412 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1413 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1414 	struct mbuf *m = rxs->rxs_mbuf;
   1415 
   1416 	/*
   1417 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1418 	 * so that the payload after the Ethernet header is aligned
   1419 	 * to a 4-byte boundary.
   1420 
   1421 	 * XXX BRAINDAMAGE ALERT!
   1422 	 * The stupid chip uses the same size for every buffer, which
   1423 	 * is set in the Receive Control register.  We are using the 2K
   1424 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1425 	 * reason, we can't "scoot" packets longer than the standard
   1426 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1427 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1428 	 * the upper layer copy the headers.
   1429 	 */
   1430 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1431 
   1432 	wm_set_dma_addr(&rxd->wrx_addr,
   1433 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1434 	rxd->wrx_len = 0;
   1435 	rxd->wrx_cksum = 0;
   1436 	rxd->wrx_status = 0;
   1437 	rxd->wrx_errors = 0;
   1438 	rxd->wrx_special = 0;
   1439 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1440 
   1441 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1442 }
   1443 
   1444 /*
   1445  * Device driver interface functions and commonly used functions.
   1446  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1447  */
   1448 
   1449 /* Lookup supported device table */
   1450 static const struct wm_product *
   1451 wm_lookup(const struct pci_attach_args *pa)
   1452 {
   1453 	const struct wm_product *wmp;
   1454 
   1455 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1456 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1457 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1458 			return wmp;
   1459 	}
   1460 	return NULL;
   1461 }
   1462 
   1463 /* The match function (ca_match) */
   1464 static int
   1465 wm_match(device_t parent, cfdata_t cf, void *aux)
   1466 {
   1467 	struct pci_attach_args *pa = aux;
   1468 
   1469 	if (wm_lookup(pa) != NULL)
   1470 		return 1;
   1471 
   1472 	return 0;
   1473 }
   1474 
   1475 /* The attach function (ca_attach) */
   1476 static void
   1477 wm_attach(device_t parent, device_t self, void *aux)
   1478 {
   1479 	struct wm_softc *sc = device_private(self);
   1480 	struct pci_attach_args *pa = aux;
   1481 	prop_dictionary_t dict;
   1482 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1483 	pci_chipset_tag_t pc = pa->pa_pc;
   1484 	int counts[PCI_INTR_TYPE_SIZE];
   1485 	pci_intr_type_t max_type;
   1486 	const char *eetype, *xname;
   1487 	bus_space_tag_t memt;
   1488 	bus_space_handle_t memh;
   1489 	bus_size_t memsize;
   1490 	int memh_valid;
   1491 	int i, error;
   1492 	const struct wm_product *wmp;
   1493 	prop_data_t ea;
   1494 	prop_number_t pn;
   1495 	uint8_t enaddr[ETHER_ADDR_LEN];
   1496 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1497 	pcireg_t preg, memtype;
   1498 	uint16_t eeprom_data, apme_mask;
   1499 	bool force_clear_smbi;
   1500 	uint32_t link_mode;
   1501 	uint32_t reg;
   1502 
   1503 	sc->sc_dev = self;
   1504 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1505 	sc->sc_stopping = false;
   1506 
   1507 	wmp = wm_lookup(pa);
   1508 #ifdef DIAGNOSTIC
   1509 	if (wmp == NULL) {
   1510 		printf("\n");
   1511 		panic("wm_attach: impossible");
   1512 	}
   1513 #endif
   1514 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1515 
   1516 	sc->sc_pc = pa->pa_pc;
   1517 	sc->sc_pcitag = pa->pa_tag;
   1518 
   1519 	if (pci_dma64_available(pa))
   1520 		sc->sc_dmat = pa->pa_dmat64;
   1521 	else
   1522 		sc->sc_dmat = pa->pa_dmat;
   1523 
   1524 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1525 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1526 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1527 
   1528 	sc->sc_type = wmp->wmp_type;
   1529 	if (sc->sc_type < WM_T_82543) {
   1530 		if (sc->sc_rev < 2) {
   1531 			aprint_error_dev(sc->sc_dev,
   1532 			    "i82542 must be at least rev. 2\n");
   1533 			return;
   1534 		}
   1535 		if (sc->sc_rev < 3)
   1536 			sc->sc_type = WM_T_82542_2_0;
   1537 	}
   1538 
   1539 	/*
   1540 	 * Disable MSI for Errata:
   1541 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1542 	 *
   1543 	 *  82544: Errata 25
   1544 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1545 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1546 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1547 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1548 	 *
   1549 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1550 	 *
   1551 	 *  82571 & 82572: Errata 63
   1552 	 */
   1553 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1554 	    || (sc->sc_type == WM_T_82572))
   1555 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1556 
   1557 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1558 	    || (sc->sc_type == WM_T_82580)
   1559 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1560 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1561 		sc->sc_flags |= WM_F_NEWQUEUE;
   1562 
   1563 	/* Set device properties (mactype) */
   1564 	dict = device_properties(sc->sc_dev);
   1565 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1566 
   1567 	/*
   1568 	 * Map the device.  All devices support memory-mapped acccess,
   1569 	 * and it is really required for normal operation.
   1570 	 */
   1571 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1572 	switch (memtype) {
   1573 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1574 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1575 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1576 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1577 		break;
   1578 	default:
   1579 		memh_valid = 0;
   1580 		break;
   1581 	}
   1582 
   1583 	if (memh_valid) {
   1584 		sc->sc_st = memt;
   1585 		sc->sc_sh = memh;
   1586 		sc->sc_ss = memsize;
   1587 	} else {
   1588 		aprint_error_dev(sc->sc_dev,
   1589 		    "unable to map device registers\n");
   1590 		return;
   1591 	}
   1592 
   1593 	/*
   1594 	 * In addition, i82544 and later support I/O mapped indirect
   1595 	 * register access.  It is not desirable (nor supported in
   1596 	 * this driver) to use it for normal operation, though it is
   1597 	 * required to work around bugs in some chip versions.
   1598 	 */
   1599 	if (sc->sc_type >= WM_T_82544) {
   1600 		/* First we have to find the I/O BAR. */
   1601 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1602 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1603 			if (memtype == PCI_MAPREG_TYPE_IO)
   1604 				break;
   1605 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1606 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1607 				i += 4;	/* skip high bits, too */
   1608 		}
   1609 		if (i < PCI_MAPREG_END) {
   1610 			/*
   1611 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1612 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1613 			 * It's no problem because newer chips has no this
   1614 			 * bug.
   1615 			 *
   1616 			 * The i8254x doesn't apparently respond when the
   1617 			 * I/O BAR is 0, which looks somewhat like it's not
   1618 			 * been configured.
   1619 			 */
   1620 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1621 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1622 				aprint_error_dev(sc->sc_dev,
   1623 				    "WARNING: I/O BAR at zero.\n");
   1624 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1625 					0, &sc->sc_iot, &sc->sc_ioh,
   1626 					NULL, &sc->sc_ios) == 0) {
   1627 				sc->sc_flags |= WM_F_IOH_VALID;
   1628 			} else {
   1629 				aprint_error_dev(sc->sc_dev,
   1630 				    "WARNING: unable to map I/O space\n");
   1631 			}
   1632 		}
   1633 
   1634 	}
   1635 
   1636 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1637 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1638 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1639 	if (sc->sc_type < WM_T_82542_2_1)
   1640 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1641 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1642 
   1643 	/* power up chip */
   1644 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1645 	    NULL)) && error != EOPNOTSUPP) {
   1646 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1647 		return;
   1648 	}
   1649 
   1650 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1651 
   1652 	/* Allocation settings */
   1653 	max_type = PCI_INTR_TYPE_MSIX;
   1654 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1655 	counts[PCI_INTR_TYPE_MSI] = 1;
   1656 	counts[PCI_INTR_TYPE_INTX] = 1;
   1657 
   1658 alloc_retry:
   1659 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1660 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1661 		return;
   1662 	}
   1663 
   1664 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1665 		error = wm_setup_msix(sc);
   1666 		if (error) {
   1667 			pci_intr_release(pc, sc->sc_intrs,
   1668 			    counts[PCI_INTR_TYPE_MSIX]);
   1669 
   1670 			/* Setup for MSI: Disable MSI-X */
   1671 			max_type = PCI_INTR_TYPE_MSI;
   1672 			counts[PCI_INTR_TYPE_MSI] = 1;
   1673 			counts[PCI_INTR_TYPE_INTX] = 1;
   1674 			goto alloc_retry;
   1675 		}
   1676 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1677 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1678 		error = wm_setup_legacy(sc);
   1679 		if (error) {
   1680 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1681 			    counts[PCI_INTR_TYPE_MSI]);
   1682 
   1683 			/* The next try is for INTx: Disable MSI */
   1684 			max_type = PCI_INTR_TYPE_INTX;
   1685 			counts[PCI_INTR_TYPE_INTX] = 1;
   1686 			goto alloc_retry;
   1687 		}
   1688 	} else {
   1689 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1690 		error = wm_setup_legacy(sc);
   1691 		if (error) {
   1692 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1693 			    counts[PCI_INTR_TYPE_INTX]);
   1694 			return;
   1695 		}
   1696 	}
   1697 
   1698 	/*
   1699 	 * Check the function ID (unit number of the chip).
   1700 	 */
   1701 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1702 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1703 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1704 	    || (sc->sc_type == WM_T_82580)
   1705 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1706 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1707 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1708 	else
   1709 		sc->sc_funcid = 0;
   1710 
   1711 	/*
   1712 	 * Determine a few things about the bus we're connected to.
   1713 	 */
   1714 	if (sc->sc_type < WM_T_82543) {
   1715 		/* We don't really know the bus characteristics here. */
   1716 		sc->sc_bus_speed = 33;
   1717 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1718 		/*
   1719 		 * CSA (Communication Streaming Architecture) is about as fast
   1720 		 * a 32-bit 66MHz PCI Bus.
   1721 		 */
   1722 		sc->sc_flags |= WM_F_CSA;
   1723 		sc->sc_bus_speed = 66;
   1724 		aprint_verbose_dev(sc->sc_dev,
   1725 		    "Communication Streaming Architecture\n");
   1726 		if (sc->sc_type == WM_T_82547) {
   1727 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1728 			callout_setfunc(&sc->sc_txfifo_ch,
   1729 					wm_82547_txfifo_stall, sc);
   1730 			aprint_verbose_dev(sc->sc_dev,
   1731 			    "using 82547 Tx FIFO stall work-around\n");
   1732 		}
   1733 	} else if (sc->sc_type >= WM_T_82571) {
   1734 		sc->sc_flags |= WM_F_PCIE;
   1735 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1736 		    && (sc->sc_type != WM_T_ICH10)
   1737 		    && (sc->sc_type != WM_T_PCH)
   1738 		    && (sc->sc_type != WM_T_PCH2)
   1739 		    && (sc->sc_type != WM_T_PCH_LPT)
   1740 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1741 			/* ICH* and PCH* have no PCIe capability registers */
   1742 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1743 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1744 				NULL) == 0)
   1745 				aprint_error_dev(sc->sc_dev,
   1746 				    "unable to find PCIe capability\n");
   1747 		}
   1748 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1749 	} else {
   1750 		reg = CSR_READ(sc, WMREG_STATUS);
   1751 		if (reg & STATUS_BUS64)
   1752 			sc->sc_flags |= WM_F_BUS64;
   1753 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1754 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1755 
   1756 			sc->sc_flags |= WM_F_PCIX;
   1757 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1758 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1759 				aprint_error_dev(sc->sc_dev,
   1760 				    "unable to find PCIX capability\n");
   1761 			else if (sc->sc_type != WM_T_82545_3 &&
   1762 				 sc->sc_type != WM_T_82546_3) {
   1763 				/*
   1764 				 * Work around a problem caused by the BIOS
   1765 				 * setting the max memory read byte count
   1766 				 * incorrectly.
   1767 				 */
   1768 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1769 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1770 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1771 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1772 
   1773 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1774 				    PCIX_CMD_BYTECNT_SHIFT;
   1775 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1776 				    PCIX_STATUS_MAXB_SHIFT;
   1777 				if (bytecnt > maxb) {
   1778 					aprint_verbose_dev(sc->sc_dev,
   1779 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1780 					    512 << bytecnt, 512 << maxb);
   1781 					pcix_cmd = (pcix_cmd &
   1782 					    ~PCIX_CMD_BYTECNT_MASK) |
   1783 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1784 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1785 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1786 					    pcix_cmd);
   1787 				}
   1788 			}
   1789 		}
   1790 		/*
   1791 		 * The quad port adapter is special; it has a PCIX-PCIX
   1792 		 * bridge on the board, and can run the secondary bus at
   1793 		 * a higher speed.
   1794 		 */
   1795 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1796 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1797 								      : 66;
   1798 		} else if (sc->sc_flags & WM_F_PCIX) {
   1799 			switch (reg & STATUS_PCIXSPD_MASK) {
   1800 			case STATUS_PCIXSPD_50_66:
   1801 				sc->sc_bus_speed = 66;
   1802 				break;
   1803 			case STATUS_PCIXSPD_66_100:
   1804 				sc->sc_bus_speed = 100;
   1805 				break;
   1806 			case STATUS_PCIXSPD_100_133:
   1807 				sc->sc_bus_speed = 133;
   1808 				break;
   1809 			default:
   1810 				aprint_error_dev(sc->sc_dev,
   1811 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1812 				    reg & STATUS_PCIXSPD_MASK);
   1813 				sc->sc_bus_speed = 66;
   1814 				break;
   1815 			}
   1816 		} else
   1817 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1818 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1819 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1820 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1821 	}
   1822 
   1823 	/* clear interesting stat counters */
   1824 	CSR_READ(sc, WMREG_COLC);
   1825 	CSR_READ(sc, WMREG_RXERRC);
   1826 
   1827 	/* get PHY control from SMBus to PCIe */
   1828 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1829 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1830 		wm_smbustopci(sc);
   1831 
   1832 	/* Reset the chip to a known state. */
   1833 	wm_reset(sc);
   1834 
   1835 	/* Get some information about the EEPROM. */
   1836 	switch (sc->sc_type) {
   1837 	case WM_T_82542_2_0:
   1838 	case WM_T_82542_2_1:
   1839 	case WM_T_82543:
   1840 	case WM_T_82544:
   1841 		/* Microwire */
   1842 		sc->sc_nvm_wordsize = 64;
   1843 		sc->sc_nvm_addrbits = 6;
   1844 		break;
   1845 	case WM_T_82540:
   1846 	case WM_T_82545:
   1847 	case WM_T_82545_3:
   1848 	case WM_T_82546:
   1849 	case WM_T_82546_3:
   1850 		/* Microwire */
   1851 		reg = CSR_READ(sc, WMREG_EECD);
   1852 		if (reg & EECD_EE_SIZE) {
   1853 			sc->sc_nvm_wordsize = 256;
   1854 			sc->sc_nvm_addrbits = 8;
   1855 		} else {
   1856 			sc->sc_nvm_wordsize = 64;
   1857 			sc->sc_nvm_addrbits = 6;
   1858 		}
   1859 		sc->sc_flags |= WM_F_LOCK_EECD;
   1860 		break;
   1861 	case WM_T_82541:
   1862 	case WM_T_82541_2:
   1863 	case WM_T_82547:
   1864 	case WM_T_82547_2:
   1865 		sc->sc_flags |= WM_F_LOCK_EECD;
   1866 		reg = CSR_READ(sc, WMREG_EECD);
   1867 		if (reg & EECD_EE_TYPE) {
   1868 			/* SPI */
   1869 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1870 			wm_nvm_set_addrbits_size_eecd(sc);
   1871 		} else {
   1872 			/* Microwire */
   1873 			if ((reg & EECD_EE_ABITS) != 0) {
   1874 				sc->sc_nvm_wordsize = 256;
   1875 				sc->sc_nvm_addrbits = 8;
   1876 			} else {
   1877 				sc->sc_nvm_wordsize = 64;
   1878 				sc->sc_nvm_addrbits = 6;
   1879 			}
   1880 		}
   1881 		break;
   1882 	case WM_T_82571:
   1883 	case WM_T_82572:
   1884 		/* SPI */
   1885 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1886 		wm_nvm_set_addrbits_size_eecd(sc);
   1887 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1888 		break;
   1889 	case WM_T_82573:
   1890 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1891 		/* FALLTHROUGH */
   1892 	case WM_T_82574:
   1893 	case WM_T_82583:
   1894 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1895 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1896 			sc->sc_nvm_wordsize = 2048;
   1897 		} else {
   1898 			/* SPI */
   1899 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1900 			wm_nvm_set_addrbits_size_eecd(sc);
   1901 		}
   1902 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1903 		break;
   1904 	case WM_T_82575:
   1905 	case WM_T_82576:
   1906 	case WM_T_82580:
   1907 	case WM_T_I350:
   1908 	case WM_T_I354:
   1909 	case WM_T_80003:
   1910 		/* SPI */
   1911 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1912 		wm_nvm_set_addrbits_size_eecd(sc);
   1913 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1914 		    | WM_F_LOCK_SWSM;
   1915 		break;
   1916 	case WM_T_ICH8:
   1917 	case WM_T_ICH9:
   1918 	case WM_T_ICH10:
   1919 	case WM_T_PCH:
   1920 	case WM_T_PCH2:
   1921 	case WM_T_PCH_LPT:
   1922 		/* FLASH */
   1923 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1924 		sc->sc_nvm_wordsize = 2048;
   1925 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1926 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1927 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1928 			aprint_error_dev(sc->sc_dev,
   1929 			    "can't map FLASH registers\n");
   1930 			goto out;
   1931 		}
   1932 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1933 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1934 		    ICH_FLASH_SECTOR_SIZE;
   1935 		sc->sc_ich8_flash_bank_size =
   1936 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1937 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1938 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1939 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1940 		sc->sc_flashreg_offset = 0;
   1941 		break;
   1942 	case WM_T_PCH_SPT:
   1943 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1944 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1945 		sc->sc_flasht = sc->sc_st;
   1946 		sc->sc_flashh = sc->sc_sh;
   1947 		sc->sc_ich8_flash_base = 0;
   1948 		sc->sc_nvm_wordsize =
   1949 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   1950 			* NVM_SIZE_MULTIPLIER;
   1951 		/* It is size in bytes, we want words */
   1952 		sc->sc_nvm_wordsize /= 2;
   1953 		/* assume 2 banks */
   1954 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   1955 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   1956 		break;
   1957 	case WM_T_I210:
   1958 	case WM_T_I211:
   1959 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1960 			wm_nvm_set_addrbits_size_eecd(sc);
   1961 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1962 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1963 		} else {
   1964 			sc->sc_nvm_wordsize = INVM_SIZE;
   1965 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1966 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1967 		}
   1968 		break;
   1969 	default:
   1970 		break;
   1971 	}
   1972 
   1973 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1974 	switch (sc->sc_type) {
   1975 	case WM_T_82571:
   1976 	case WM_T_82572:
   1977 		reg = CSR_READ(sc, WMREG_SWSM2);
   1978 		if ((reg & SWSM2_LOCK) == 0) {
   1979 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1980 			force_clear_smbi = true;
   1981 		} else
   1982 			force_clear_smbi = false;
   1983 		break;
   1984 	case WM_T_82573:
   1985 	case WM_T_82574:
   1986 	case WM_T_82583:
   1987 		force_clear_smbi = true;
   1988 		break;
   1989 	default:
   1990 		force_clear_smbi = false;
   1991 		break;
   1992 	}
   1993 	if (force_clear_smbi) {
   1994 		reg = CSR_READ(sc, WMREG_SWSM);
   1995 		if ((reg & SWSM_SMBI) != 0)
   1996 			aprint_error_dev(sc->sc_dev,
   1997 			    "Please update the Bootagent\n");
   1998 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1999 	}
   2000 
   2001 	/*
   2002 	 * Defer printing the EEPROM type until after verifying the checksum
   2003 	 * This allows the EEPROM type to be printed correctly in the case
   2004 	 * that no EEPROM is attached.
   2005 	 */
   2006 	/*
   2007 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2008 	 * this for later, so we can fail future reads from the EEPROM.
   2009 	 */
   2010 	if (wm_nvm_validate_checksum(sc)) {
   2011 		/*
   2012 		 * Read twice again because some PCI-e parts fail the
   2013 		 * first check due to the link being in sleep state.
   2014 		 */
   2015 		if (wm_nvm_validate_checksum(sc))
   2016 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2017 	}
   2018 
   2019 	/* Set device properties (macflags) */
   2020 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2021 
   2022 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2023 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2024 	else {
   2025 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2026 		    sc->sc_nvm_wordsize);
   2027 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2028 			aprint_verbose("iNVM");
   2029 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2030 			aprint_verbose("FLASH(HW)");
   2031 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2032 			aprint_verbose("FLASH");
   2033 		else {
   2034 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2035 				eetype = "SPI";
   2036 			else
   2037 				eetype = "MicroWire";
   2038 			aprint_verbose("(%d address bits) %s EEPROM",
   2039 			    sc->sc_nvm_addrbits, eetype);
   2040 		}
   2041 	}
   2042 	wm_nvm_version(sc);
   2043 	aprint_verbose("\n");
   2044 
   2045 	/* Check for I21[01] PLL workaround */
   2046 	if (sc->sc_type == WM_T_I210)
   2047 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2048 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2049 		/* NVM image release 3.25 has a workaround */
   2050 		if ((sc->sc_nvm_ver_major < 3)
   2051 		    || ((sc->sc_nvm_ver_major == 3)
   2052 			&& (sc->sc_nvm_ver_minor < 25))) {
   2053 			aprint_verbose_dev(sc->sc_dev,
   2054 			    "ROM image version %d.%d is older than 3.25\n",
   2055 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2056 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2057 		}
   2058 	}
   2059 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2060 		wm_pll_workaround_i210(sc);
   2061 
   2062 	wm_get_wakeup(sc);
   2063 	switch (sc->sc_type) {
   2064 	case WM_T_82571:
   2065 	case WM_T_82572:
   2066 	case WM_T_82573:
   2067 	case WM_T_82574:
   2068 	case WM_T_82583:
   2069 	case WM_T_80003:
   2070 	case WM_T_ICH8:
   2071 	case WM_T_ICH9:
   2072 	case WM_T_ICH10:
   2073 	case WM_T_PCH:
   2074 	case WM_T_PCH2:
   2075 	case WM_T_PCH_LPT:
   2076 	case WM_T_PCH_SPT:
   2077 		/* Non-AMT based hardware can now take control from firmware */
   2078 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2079 			wm_get_hw_control(sc);
   2080 		break;
   2081 	default:
   2082 		break;
   2083 	}
   2084 
   2085 	/*
   2086 	 * Read the Ethernet address from the EEPROM, if not first found
   2087 	 * in device properties.
   2088 	 */
   2089 	ea = prop_dictionary_get(dict, "mac-address");
   2090 	if (ea != NULL) {
   2091 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2092 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2093 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2094 	} else {
   2095 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2096 			aprint_error_dev(sc->sc_dev,
   2097 			    "unable to read Ethernet address\n");
   2098 			goto out;
   2099 		}
   2100 	}
   2101 
   2102 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2103 	    ether_sprintf(enaddr));
   2104 
   2105 	/*
   2106 	 * Read the config info from the EEPROM, and set up various
   2107 	 * bits in the control registers based on their contents.
   2108 	 */
   2109 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2110 	if (pn != NULL) {
   2111 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2112 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2113 	} else {
   2114 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2115 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2116 			goto out;
   2117 		}
   2118 	}
   2119 
   2120 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2121 	if (pn != NULL) {
   2122 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2123 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2124 	} else {
   2125 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2126 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2127 			goto out;
   2128 		}
   2129 	}
   2130 
   2131 	/* check for WM_F_WOL */
   2132 	switch (sc->sc_type) {
   2133 	case WM_T_82542_2_0:
   2134 	case WM_T_82542_2_1:
   2135 	case WM_T_82543:
   2136 		/* dummy? */
   2137 		eeprom_data = 0;
   2138 		apme_mask = NVM_CFG3_APME;
   2139 		break;
   2140 	case WM_T_82544:
   2141 		apme_mask = NVM_CFG2_82544_APM_EN;
   2142 		eeprom_data = cfg2;
   2143 		break;
   2144 	case WM_T_82546:
   2145 	case WM_T_82546_3:
   2146 	case WM_T_82571:
   2147 	case WM_T_82572:
   2148 	case WM_T_82573:
   2149 	case WM_T_82574:
   2150 	case WM_T_82583:
   2151 	case WM_T_80003:
   2152 	default:
   2153 		apme_mask = NVM_CFG3_APME;
   2154 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2155 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2156 		break;
   2157 	case WM_T_82575:
   2158 	case WM_T_82576:
   2159 	case WM_T_82580:
   2160 	case WM_T_I350:
   2161 	case WM_T_I354: /* XXX ok? */
   2162 	case WM_T_ICH8:
   2163 	case WM_T_ICH9:
   2164 	case WM_T_ICH10:
   2165 	case WM_T_PCH:
   2166 	case WM_T_PCH2:
   2167 	case WM_T_PCH_LPT:
   2168 	case WM_T_PCH_SPT:
   2169 		/* XXX The funcid should be checked on some devices */
   2170 		apme_mask = WUC_APME;
   2171 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2172 		break;
   2173 	}
   2174 
   2175 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2176 	if ((eeprom_data & apme_mask) != 0)
   2177 		sc->sc_flags |= WM_F_WOL;
   2178 #ifdef WM_DEBUG
   2179 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2180 		printf("WOL\n");
   2181 #endif
   2182 
   2183 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2184 		/* Check NVM for autonegotiation */
   2185 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2186 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2187 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2188 		}
   2189 	}
   2190 
   2191 	/*
   2192 	 * XXX need special handling for some multiple port cards
   2193 	 * to disable a paticular port.
   2194 	 */
   2195 
   2196 	if (sc->sc_type >= WM_T_82544) {
   2197 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2198 		if (pn != NULL) {
   2199 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2200 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2201 		} else {
   2202 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2203 				aprint_error_dev(sc->sc_dev,
   2204 				    "unable to read SWDPIN\n");
   2205 				goto out;
   2206 			}
   2207 		}
   2208 	}
   2209 
   2210 	if (cfg1 & NVM_CFG1_ILOS)
   2211 		sc->sc_ctrl |= CTRL_ILOS;
   2212 
   2213 	/*
   2214 	 * XXX
   2215 	 * This code isn't correct because pin 2 and 3 are located
   2216 	 * in different position on newer chips. Check all datasheet.
   2217 	 *
   2218 	 * Until resolve this problem, check if a chip < 82580
   2219 	 */
   2220 	if (sc->sc_type <= WM_T_82580) {
   2221 		if (sc->sc_type >= WM_T_82544) {
   2222 			sc->sc_ctrl |=
   2223 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2224 			    CTRL_SWDPIO_SHIFT;
   2225 			sc->sc_ctrl |=
   2226 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2227 			    CTRL_SWDPINS_SHIFT;
   2228 		} else {
   2229 			sc->sc_ctrl |=
   2230 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2231 			    CTRL_SWDPIO_SHIFT;
   2232 		}
   2233 	}
   2234 
   2235 	/* XXX For other than 82580? */
   2236 	if (sc->sc_type == WM_T_82580) {
   2237 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2238 		if (nvmword & __BIT(13))
   2239 			sc->sc_ctrl |= CTRL_ILOS;
   2240 	}
   2241 
   2242 #if 0
   2243 	if (sc->sc_type >= WM_T_82544) {
   2244 		if (cfg1 & NVM_CFG1_IPS0)
   2245 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2246 		if (cfg1 & NVM_CFG1_IPS1)
   2247 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2248 		sc->sc_ctrl_ext |=
   2249 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2250 		    CTRL_EXT_SWDPIO_SHIFT;
   2251 		sc->sc_ctrl_ext |=
   2252 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2253 		    CTRL_EXT_SWDPINS_SHIFT;
   2254 	} else {
   2255 		sc->sc_ctrl_ext |=
   2256 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2257 		    CTRL_EXT_SWDPIO_SHIFT;
   2258 	}
   2259 #endif
   2260 
   2261 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2262 #if 0
   2263 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2264 #endif
   2265 
   2266 	if (sc->sc_type == WM_T_PCH) {
   2267 		uint16_t val;
   2268 
   2269 		/* Save the NVM K1 bit setting */
   2270 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2271 
   2272 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2273 			sc->sc_nvm_k1_enabled = 1;
   2274 		else
   2275 			sc->sc_nvm_k1_enabled = 0;
   2276 	}
   2277 
   2278 	/*
   2279 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2280 	 * media structures accordingly.
   2281 	 */
   2282 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2283 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2284 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2285 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2286 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2287 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2288 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2289 	} else if (sc->sc_type < WM_T_82543 ||
   2290 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2291 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2292 			aprint_error_dev(sc->sc_dev,
   2293 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2294 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2295 		}
   2296 		wm_tbi_mediainit(sc);
   2297 	} else {
   2298 		switch (sc->sc_type) {
   2299 		case WM_T_82575:
   2300 		case WM_T_82576:
   2301 		case WM_T_82580:
   2302 		case WM_T_I350:
   2303 		case WM_T_I354:
   2304 		case WM_T_I210:
   2305 		case WM_T_I211:
   2306 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2307 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2308 			switch (link_mode) {
   2309 			case CTRL_EXT_LINK_MODE_1000KX:
   2310 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2311 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2312 				break;
   2313 			case CTRL_EXT_LINK_MODE_SGMII:
   2314 				if (wm_sgmii_uses_mdio(sc)) {
   2315 					aprint_verbose_dev(sc->sc_dev,
   2316 					    "SGMII(MDIO)\n");
   2317 					sc->sc_flags |= WM_F_SGMII;
   2318 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2319 					break;
   2320 				}
   2321 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2322 				/*FALLTHROUGH*/
   2323 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2324 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2325 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2326 					if (link_mode
   2327 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2328 						sc->sc_mediatype
   2329 						    = WM_MEDIATYPE_COPPER;
   2330 						sc->sc_flags |= WM_F_SGMII;
   2331 					} else {
   2332 						sc->sc_mediatype
   2333 						    = WM_MEDIATYPE_SERDES;
   2334 						aprint_verbose_dev(sc->sc_dev,
   2335 						    "SERDES\n");
   2336 					}
   2337 					break;
   2338 				}
   2339 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2340 					aprint_verbose_dev(sc->sc_dev,
   2341 					    "SERDES\n");
   2342 
   2343 				/* Change current link mode setting */
   2344 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2345 				switch (sc->sc_mediatype) {
   2346 				case WM_MEDIATYPE_COPPER:
   2347 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2348 					break;
   2349 				case WM_MEDIATYPE_SERDES:
   2350 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2351 					break;
   2352 				default:
   2353 					break;
   2354 				}
   2355 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2356 				break;
   2357 			case CTRL_EXT_LINK_MODE_GMII:
   2358 			default:
   2359 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2360 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2361 				break;
   2362 			}
   2363 
   2364 			reg &= ~CTRL_EXT_I2C_ENA;
   2365 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2366 				reg |= CTRL_EXT_I2C_ENA;
   2367 			else
   2368 				reg &= ~CTRL_EXT_I2C_ENA;
   2369 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2370 
   2371 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2372 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2373 			else
   2374 				wm_tbi_mediainit(sc);
   2375 			break;
   2376 		default:
   2377 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2378 				aprint_error_dev(sc->sc_dev,
   2379 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2380 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2381 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2382 		}
   2383 	}
   2384 
   2385 	ifp = &sc->sc_ethercom.ec_if;
   2386 	xname = device_xname(sc->sc_dev);
   2387 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2388 	ifp->if_softc = sc;
   2389 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2390 	ifp->if_ioctl = wm_ioctl;
   2391 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2392 		ifp->if_start = wm_nq_start;
   2393 	else
   2394 		ifp->if_start = wm_start;
   2395 	ifp->if_watchdog = wm_watchdog;
   2396 	ifp->if_init = wm_init;
   2397 	ifp->if_stop = wm_stop;
   2398 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2399 	IFQ_SET_READY(&ifp->if_snd);
   2400 
   2401 	/* Check for jumbo frame */
   2402 	switch (sc->sc_type) {
   2403 	case WM_T_82573:
   2404 		/* XXX limited to 9234 if ASPM is disabled */
   2405 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2406 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2407 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2408 		break;
   2409 	case WM_T_82571:
   2410 	case WM_T_82572:
   2411 	case WM_T_82574:
   2412 	case WM_T_82575:
   2413 	case WM_T_82576:
   2414 	case WM_T_82580:
   2415 	case WM_T_I350:
   2416 	case WM_T_I354: /* XXXX ok? */
   2417 	case WM_T_I210:
   2418 	case WM_T_I211:
   2419 	case WM_T_80003:
   2420 	case WM_T_ICH9:
   2421 	case WM_T_ICH10:
   2422 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2423 	case WM_T_PCH_LPT:
   2424 	case WM_T_PCH_SPT:
   2425 		/* XXX limited to 9234 */
   2426 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2427 		break;
   2428 	case WM_T_PCH:
   2429 		/* XXX limited to 4096 */
   2430 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2431 		break;
   2432 	case WM_T_82542_2_0:
   2433 	case WM_T_82542_2_1:
   2434 	case WM_T_82583:
   2435 	case WM_T_ICH8:
   2436 		/* No support for jumbo frame */
   2437 		break;
   2438 	default:
   2439 		/* ETHER_MAX_LEN_JUMBO */
   2440 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2441 		break;
   2442 	}
   2443 
   2444 	/* If we're a i82543 or greater, we can support VLANs. */
   2445 	if (sc->sc_type >= WM_T_82543)
   2446 		sc->sc_ethercom.ec_capabilities |=
   2447 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2448 
   2449 	/*
   2450 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2451 	 * on i82543 and later.
   2452 	 */
   2453 	if (sc->sc_type >= WM_T_82543) {
   2454 		ifp->if_capabilities |=
   2455 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2456 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2457 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2458 		    IFCAP_CSUM_TCPv6_Tx |
   2459 		    IFCAP_CSUM_UDPv6_Tx;
   2460 	}
   2461 
   2462 	/*
   2463 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2464 	 *
   2465 	 *	82541GI (8086:1076) ... no
   2466 	 *	82572EI (8086:10b9) ... yes
   2467 	 */
   2468 	if (sc->sc_type >= WM_T_82571) {
   2469 		ifp->if_capabilities |=
   2470 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2471 	}
   2472 
   2473 	/*
   2474 	 * If we're a i82544 or greater (except i82547), we can do
   2475 	 * TCP segmentation offload.
   2476 	 */
   2477 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2478 		ifp->if_capabilities |= IFCAP_TSOv4;
   2479 	}
   2480 
   2481 	if (sc->sc_type >= WM_T_82571) {
   2482 		ifp->if_capabilities |= IFCAP_TSOv6;
   2483 	}
   2484 
   2485 #ifdef WM_MPSAFE
   2486 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2487 #else
   2488 	sc->sc_core_lock = NULL;
   2489 #endif
   2490 
   2491 	/* Attach the interface. */
   2492 	if_initialize(ifp);
   2493 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2494 	ether_ifattach(ifp, enaddr);
   2495 	if_register(ifp);
   2496 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2497 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2498 			  RND_FLAG_DEFAULT);
   2499 
   2500 #ifdef WM_EVENT_COUNTERS
   2501 	/* Attach event counters. */
   2502 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2503 	    NULL, xname, "txsstall");
   2504 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2505 	    NULL, xname, "txdstall");
   2506 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2507 	    NULL, xname, "txfifo_stall");
   2508 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2509 	    NULL, xname, "txdw");
   2510 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2511 	    NULL, xname, "txqe");
   2512 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2513 	    NULL, xname, "rxintr");
   2514 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2515 	    NULL, xname, "linkintr");
   2516 
   2517 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2518 	    NULL, xname, "rxipsum");
   2519 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2520 	    NULL, xname, "rxtusum");
   2521 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2522 	    NULL, xname, "txipsum");
   2523 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2524 	    NULL, xname, "txtusum");
   2525 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2526 	    NULL, xname, "txtusum6");
   2527 
   2528 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2529 	    NULL, xname, "txtso");
   2530 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2531 	    NULL, xname, "txtso6");
   2532 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2533 	    NULL, xname, "txtsopain");
   2534 
   2535 	for (i = 0; i < WM_NTXSEGS; i++) {
   2536 		snprintf(wm_txseg_evcnt_names[i],
   2537 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2538 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2539 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2540 	}
   2541 
   2542 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2543 	    NULL, xname, "txdrop");
   2544 
   2545 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2546 	    NULL, xname, "tu");
   2547 
   2548 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2549 	    NULL, xname, "tx_xoff");
   2550 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2551 	    NULL, xname, "tx_xon");
   2552 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2553 	    NULL, xname, "rx_xoff");
   2554 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2555 	    NULL, xname, "rx_xon");
   2556 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2557 	    NULL, xname, "rx_macctl");
   2558 #endif /* WM_EVENT_COUNTERS */
   2559 
   2560 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2561 		pmf_class_network_register(self, ifp);
   2562 	else
   2563 		aprint_error_dev(self, "couldn't establish power handler\n");
   2564 
   2565 	sc->sc_flags |= WM_F_ATTACHED;
   2566  out:
   2567 	return;
   2568 }
   2569 
   2570 /* The detach function (ca_detach) */
   2571 static int
   2572 wm_detach(device_t self, int flags __unused)
   2573 {
   2574 	struct wm_softc *sc = device_private(self);
   2575 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2576 	int i;
   2577 #ifndef WM_MPSAFE
   2578 	int s;
   2579 #endif
   2580 
   2581 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2582 		return 0;
   2583 
   2584 #ifndef WM_MPSAFE
   2585 	s = splnet();
   2586 #endif
   2587 	/* Stop the interface. Callouts are stopped in it. */
   2588 	wm_stop(ifp, 1);
   2589 
   2590 #ifndef WM_MPSAFE
   2591 	splx(s);
   2592 #endif
   2593 
   2594 	pmf_device_deregister(self);
   2595 
   2596 	/* Tell the firmware about the release */
   2597 	WM_CORE_LOCK(sc);
   2598 	wm_release_manageability(sc);
   2599 	wm_release_hw_control(sc);
   2600 	WM_CORE_UNLOCK(sc);
   2601 
   2602 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2603 
   2604 	/* Delete all remaining media. */
   2605 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2606 
   2607 	ether_ifdetach(ifp);
   2608 	if_detach(ifp);
   2609 	if_percpuq_destroy(sc->sc_ipq);
   2610 
   2611 	/* Unload RX dmamaps and free mbufs */
   2612 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2613 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2614 		WM_RX_LOCK(rxq);
   2615 		wm_rxdrain(rxq);
   2616 		WM_RX_UNLOCK(rxq);
   2617 	}
   2618 	/* Must unlock here */
   2619 
   2620 	/* Disestablish the interrupt handler */
   2621 	for (i = 0; i < sc->sc_nintrs; i++) {
   2622 		if (sc->sc_ihs[i] != NULL) {
   2623 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2624 			sc->sc_ihs[i] = NULL;
   2625 		}
   2626 	}
   2627 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2628 
   2629 	wm_free_txrx_queues(sc);
   2630 
   2631 	/* Unmap the registers */
   2632 	if (sc->sc_ss) {
   2633 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2634 		sc->sc_ss = 0;
   2635 	}
   2636 	if (sc->sc_ios) {
   2637 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2638 		sc->sc_ios = 0;
   2639 	}
   2640 	if (sc->sc_flashs) {
   2641 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2642 		sc->sc_flashs = 0;
   2643 	}
   2644 
   2645 	if (sc->sc_core_lock)
   2646 		mutex_obj_free(sc->sc_core_lock);
   2647 
   2648 	return 0;
   2649 }
   2650 
   2651 static bool
   2652 wm_suspend(device_t self, const pmf_qual_t *qual)
   2653 {
   2654 	struct wm_softc *sc = device_private(self);
   2655 
   2656 	wm_release_manageability(sc);
   2657 	wm_release_hw_control(sc);
   2658 #ifdef WM_WOL
   2659 	wm_enable_wakeup(sc);
   2660 #endif
   2661 
   2662 	return true;
   2663 }
   2664 
   2665 static bool
   2666 wm_resume(device_t self, const pmf_qual_t *qual)
   2667 {
   2668 	struct wm_softc *sc = device_private(self);
   2669 
   2670 	wm_init_manageability(sc);
   2671 
   2672 	return true;
   2673 }
   2674 
   2675 /*
   2676  * wm_watchdog:		[ifnet interface function]
   2677  *
   2678  *	Watchdog timer handler.
   2679  */
   2680 static void
   2681 wm_watchdog(struct ifnet *ifp)
   2682 {
   2683 	struct wm_softc *sc = ifp->if_softc;
   2684 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2685 
   2686 	/*
   2687 	 * Since we're using delayed interrupts, sweep up
   2688 	 * before we report an error.
   2689 	 */
   2690 	WM_TX_LOCK(txq);
   2691 	wm_txeof(sc);
   2692 	WM_TX_UNLOCK(txq);
   2693 
   2694 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2695 #ifdef WM_DEBUG
   2696 		int i, j;
   2697 		struct wm_txsoft *txs;
   2698 #endif
   2699 		log(LOG_ERR,
   2700 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2701 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2702 		    txq->txq_next);
   2703 		ifp->if_oerrors++;
   2704 #ifdef WM_DEBUG
   2705 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2706 		    i = WM_NEXTTXS(txq, i)) {
   2707 		    txs = &txq->txq_soft[i];
   2708 		    printf("txs %d tx %d -> %d\n",
   2709 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2710 		    for (j = txs->txs_firstdesc; ;
   2711 			j = WM_NEXTTX(txq, j)) {
   2712 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2713 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2714 			printf("\t %#08x%08x\n",
   2715 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2716 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2717 			if (j == txs->txs_lastdesc)
   2718 				break;
   2719 			}
   2720 		}
   2721 #endif
   2722 		/* Reset the interface. */
   2723 		(void) wm_init(ifp);
   2724 	}
   2725 
   2726 	/* Try to get more packets going. */
   2727 	ifp->if_start(ifp);
   2728 }
   2729 
   2730 /*
   2731  * wm_tick:
   2732  *
   2733  *	One second timer, used to check link status, sweep up
   2734  *	completed transmit jobs, etc.
   2735  */
   2736 static void
   2737 wm_tick(void *arg)
   2738 {
   2739 	struct wm_softc *sc = arg;
   2740 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2741 #ifndef WM_MPSAFE
   2742 	int s;
   2743 
   2744 	s = splnet();
   2745 #endif
   2746 
   2747 	WM_CORE_LOCK(sc);
   2748 
   2749 	if (sc->sc_stopping)
   2750 		goto out;
   2751 
   2752 	if (sc->sc_type >= WM_T_82542_2_1) {
   2753 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2754 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2755 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2756 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2757 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2758 	}
   2759 
   2760 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2761 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2762 	    + CSR_READ(sc, WMREG_CRCERRS)
   2763 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2764 	    + CSR_READ(sc, WMREG_SYMERRC)
   2765 	    + CSR_READ(sc, WMREG_RXERRC)
   2766 	    + CSR_READ(sc, WMREG_SEC)
   2767 	    + CSR_READ(sc, WMREG_CEXTERR)
   2768 	    + CSR_READ(sc, WMREG_RLEC);
   2769 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2770 
   2771 	if (sc->sc_flags & WM_F_HAS_MII)
   2772 		mii_tick(&sc->sc_mii);
   2773 	else if ((sc->sc_type >= WM_T_82575)
   2774 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2775 		wm_serdes_tick(sc);
   2776 	else
   2777 		wm_tbi_tick(sc);
   2778 
   2779 out:
   2780 	WM_CORE_UNLOCK(sc);
   2781 #ifndef WM_MPSAFE
   2782 	splx(s);
   2783 #endif
   2784 
   2785 	if (!sc->sc_stopping)
   2786 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2787 }
   2788 
   2789 static int
   2790 wm_ifflags_cb(struct ethercom *ec)
   2791 {
   2792 	struct ifnet *ifp = &ec->ec_if;
   2793 	struct wm_softc *sc = ifp->if_softc;
   2794 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2795 	int rc = 0;
   2796 
   2797 	WM_CORE_LOCK(sc);
   2798 
   2799 	if (change != 0)
   2800 		sc->sc_if_flags = ifp->if_flags;
   2801 
   2802 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2803 		rc = ENETRESET;
   2804 		goto out;
   2805 	}
   2806 
   2807 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2808 		wm_set_filter(sc);
   2809 
   2810 	wm_set_vlan(sc);
   2811 
   2812 out:
   2813 	WM_CORE_UNLOCK(sc);
   2814 
   2815 	return rc;
   2816 }
   2817 
   2818 /*
   2819  * wm_ioctl:		[ifnet interface function]
   2820  *
   2821  *	Handle control requests from the operator.
   2822  */
   2823 static int
   2824 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2825 {
   2826 	struct wm_softc *sc = ifp->if_softc;
   2827 	struct ifreq *ifr = (struct ifreq *) data;
   2828 	struct ifaddr *ifa = (struct ifaddr *)data;
   2829 	struct sockaddr_dl *sdl;
   2830 	int s, error;
   2831 
   2832 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2833 		device_xname(sc->sc_dev), __func__));
   2834 #ifndef WM_MPSAFE
   2835 	s = splnet();
   2836 #endif
   2837 	switch (cmd) {
   2838 	case SIOCSIFMEDIA:
   2839 	case SIOCGIFMEDIA:
   2840 		WM_CORE_LOCK(sc);
   2841 		/* Flow control requires full-duplex mode. */
   2842 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2843 		    (ifr->ifr_media & IFM_FDX) == 0)
   2844 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2845 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2846 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2847 				/* We can do both TXPAUSE and RXPAUSE. */
   2848 				ifr->ifr_media |=
   2849 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2850 			}
   2851 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2852 		}
   2853 		WM_CORE_UNLOCK(sc);
   2854 #ifdef WM_MPSAFE
   2855 		s = splnet();
   2856 #endif
   2857 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2858 #ifdef WM_MPSAFE
   2859 		splx(s);
   2860 #endif
   2861 		break;
   2862 	case SIOCINITIFADDR:
   2863 		WM_CORE_LOCK(sc);
   2864 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2865 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2866 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2867 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2868 			/* unicast address is first multicast entry */
   2869 			wm_set_filter(sc);
   2870 			error = 0;
   2871 			WM_CORE_UNLOCK(sc);
   2872 			break;
   2873 		}
   2874 		WM_CORE_UNLOCK(sc);
   2875 		/*FALLTHROUGH*/
   2876 	default:
   2877 #ifdef WM_MPSAFE
   2878 		s = splnet();
   2879 #endif
   2880 		/* It may call wm_start, so unlock here */
   2881 		error = ether_ioctl(ifp, cmd, data);
   2882 #ifdef WM_MPSAFE
   2883 		splx(s);
   2884 #endif
   2885 		if (error != ENETRESET)
   2886 			break;
   2887 
   2888 		error = 0;
   2889 
   2890 		if (cmd == SIOCSIFCAP) {
   2891 			error = (*ifp->if_init)(ifp);
   2892 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2893 			;
   2894 		else if (ifp->if_flags & IFF_RUNNING) {
   2895 			/*
   2896 			 * Multicast list has changed; set the hardware filter
   2897 			 * accordingly.
   2898 			 */
   2899 			WM_CORE_LOCK(sc);
   2900 			wm_set_filter(sc);
   2901 			WM_CORE_UNLOCK(sc);
   2902 		}
   2903 		break;
   2904 	}
   2905 
   2906 #ifndef WM_MPSAFE
   2907 	splx(s);
   2908 #endif
   2909 	return error;
   2910 }
   2911 
   2912 /* MAC address related */
   2913 
   2914 /*
   2915  * Get the offset of MAC address and return it.
   2916  * If error occured, use offset 0.
   2917  */
   2918 static uint16_t
   2919 wm_check_alt_mac_addr(struct wm_softc *sc)
   2920 {
   2921 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2922 	uint16_t offset = NVM_OFF_MACADDR;
   2923 
   2924 	/* Try to read alternative MAC address pointer */
   2925 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2926 		return 0;
   2927 
   2928 	/* Check pointer if it's valid or not. */
   2929 	if ((offset == 0x0000) || (offset == 0xffff))
   2930 		return 0;
   2931 
   2932 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2933 	/*
   2934 	 * Check whether alternative MAC address is valid or not.
   2935 	 * Some cards have non 0xffff pointer but those don't use
   2936 	 * alternative MAC address in reality.
   2937 	 *
   2938 	 * Check whether the broadcast bit is set or not.
   2939 	 */
   2940 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2941 		if (((myea[0] & 0xff) & 0x01) == 0)
   2942 			return offset; /* Found */
   2943 
   2944 	/* Not found */
   2945 	return 0;
   2946 }
   2947 
   2948 static int
   2949 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2950 {
   2951 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2952 	uint16_t offset = NVM_OFF_MACADDR;
   2953 	int do_invert = 0;
   2954 
   2955 	switch (sc->sc_type) {
   2956 	case WM_T_82580:
   2957 	case WM_T_I350:
   2958 	case WM_T_I354:
   2959 		/* EEPROM Top Level Partitioning */
   2960 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2961 		break;
   2962 	case WM_T_82571:
   2963 	case WM_T_82575:
   2964 	case WM_T_82576:
   2965 	case WM_T_80003:
   2966 	case WM_T_I210:
   2967 	case WM_T_I211:
   2968 		offset = wm_check_alt_mac_addr(sc);
   2969 		if (offset == 0)
   2970 			if ((sc->sc_funcid & 0x01) == 1)
   2971 				do_invert = 1;
   2972 		break;
   2973 	default:
   2974 		if ((sc->sc_funcid & 0x01) == 1)
   2975 			do_invert = 1;
   2976 		break;
   2977 	}
   2978 
   2979 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2980 		myea) != 0)
   2981 		goto bad;
   2982 
   2983 	enaddr[0] = myea[0] & 0xff;
   2984 	enaddr[1] = myea[0] >> 8;
   2985 	enaddr[2] = myea[1] & 0xff;
   2986 	enaddr[3] = myea[1] >> 8;
   2987 	enaddr[4] = myea[2] & 0xff;
   2988 	enaddr[5] = myea[2] >> 8;
   2989 
   2990 	/*
   2991 	 * Toggle the LSB of the MAC address on the second port
   2992 	 * of some dual port cards.
   2993 	 */
   2994 	if (do_invert != 0)
   2995 		enaddr[5] ^= 1;
   2996 
   2997 	return 0;
   2998 
   2999  bad:
   3000 	return -1;
   3001 }
   3002 
   3003 /*
   3004  * wm_set_ral:
   3005  *
   3006  *	Set an entery in the receive address list.
   3007  */
   3008 static void
   3009 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3010 {
   3011 	uint32_t ral_lo, ral_hi;
   3012 
   3013 	if (enaddr != NULL) {
   3014 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3015 		    (enaddr[3] << 24);
   3016 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3017 		ral_hi |= RAL_AV;
   3018 	} else {
   3019 		ral_lo = 0;
   3020 		ral_hi = 0;
   3021 	}
   3022 
   3023 	if (sc->sc_type >= WM_T_82544) {
   3024 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3025 		    ral_lo);
   3026 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3027 		    ral_hi);
   3028 	} else {
   3029 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3030 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3031 	}
   3032 }
   3033 
   3034 /*
   3035  * wm_mchash:
   3036  *
   3037  *	Compute the hash of the multicast address for the 4096-bit
   3038  *	multicast filter.
   3039  */
   3040 static uint32_t
   3041 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3042 {
   3043 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3044 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3045 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3046 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3047 	uint32_t hash;
   3048 
   3049 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3050 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3051 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3052 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3053 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3054 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3055 		return (hash & 0x3ff);
   3056 	}
   3057 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3058 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3059 
   3060 	return (hash & 0xfff);
   3061 }
   3062 
   3063 /*
   3064  * wm_set_filter:
   3065  *
   3066  *	Set up the receive filter.
   3067  */
   3068 static void
   3069 wm_set_filter(struct wm_softc *sc)
   3070 {
   3071 	struct ethercom *ec = &sc->sc_ethercom;
   3072 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3073 	struct ether_multi *enm;
   3074 	struct ether_multistep step;
   3075 	bus_addr_t mta_reg;
   3076 	uint32_t hash, reg, bit;
   3077 	int i, size, ralmax;
   3078 
   3079 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3080 		device_xname(sc->sc_dev), __func__));
   3081 	if (sc->sc_type >= WM_T_82544)
   3082 		mta_reg = WMREG_CORDOVA_MTA;
   3083 	else
   3084 		mta_reg = WMREG_MTA;
   3085 
   3086 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3087 
   3088 	if (ifp->if_flags & IFF_BROADCAST)
   3089 		sc->sc_rctl |= RCTL_BAM;
   3090 	if (ifp->if_flags & IFF_PROMISC) {
   3091 		sc->sc_rctl |= RCTL_UPE;
   3092 		goto allmulti;
   3093 	}
   3094 
   3095 	/*
   3096 	 * Set the station address in the first RAL slot, and
   3097 	 * clear the remaining slots.
   3098 	 */
   3099 	if (sc->sc_type == WM_T_ICH8)
   3100 		size = WM_RAL_TABSIZE_ICH8 -1;
   3101 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3102 	    || (sc->sc_type == WM_T_PCH))
   3103 		size = WM_RAL_TABSIZE_ICH8;
   3104 	else if (sc->sc_type == WM_T_PCH2)
   3105 		size = WM_RAL_TABSIZE_PCH2;
   3106 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3107 		size = WM_RAL_TABSIZE_PCH_LPT;
   3108 	else if (sc->sc_type == WM_T_82575)
   3109 		size = WM_RAL_TABSIZE_82575;
   3110 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3111 		size = WM_RAL_TABSIZE_82576;
   3112 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3113 		size = WM_RAL_TABSIZE_I350;
   3114 	else
   3115 		size = WM_RAL_TABSIZE;
   3116 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3117 
   3118 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3119 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3120 		switch (i) {
   3121 		case 0:
   3122 			/* We can use all entries */
   3123 			ralmax = size;
   3124 			break;
   3125 		case 1:
   3126 			/* Only RAR[0] */
   3127 			ralmax = 1;
   3128 			break;
   3129 		default:
   3130 			/* available SHRA + RAR[0] */
   3131 			ralmax = i + 1;
   3132 		}
   3133 	} else
   3134 		ralmax = size;
   3135 	for (i = 1; i < size; i++) {
   3136 		if (i < ralmax)
   3137 			wm_set_ral(sc, NULL, i);
   3138 	}
   3139 
   3140 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3141 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3142 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3143 	    || (sc->sc_type == WM_T_PCH_SPT))
   3144 		size = WM_ICH8_MC_TABSIZE;
   3145 	else
   3146 		size = WM_MC_TABSIZE;
   3147 	/* Clear out the multicast table. */
   3148 	for (i = 0; i < size; i++)
   3149 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3150 
   3151 	ETHER_FIRST_MULTI(step, ec, enm);
   3152 	while (enm != NULL) {
   3153 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3154 			/*
   3155 			 * We must listen to a range of multicast addresses.
   3156 			 * For now, just accept all multicasts, rather than
   3157 			 * trying to set only those filter bits needed to match
   3158 			 * the range.  (At this time, the only use of address
   3159 			 * ranges is for IP multicast routing, for which the
   3160 			 * range is big enough to require all bits set.)
   3161 			 */
   3162 			goto allmulti;
   3163 		}
   3164 
   3165 		hash = wm_mchash(sc, enm->enm_addrlo);
   3166 
   3167 		reg = (hash >> 5);
   3168 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3169 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3170 		    || (sc->sc_type == WM_T_PCH2)
   3171 		    || (sc->sc_type == WM_T_PCH_LPT)
   3172 		    || (sc->sc_type == WM_T_PCH_SPT))
   3173 			reg &= 0x1f;
   3174 		else
   3175 			reg &= 0x7f;
   3176 		bit = hash & 0x1f;
   3177 
   3178 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3179 		hash |= 1U << bit;
   3180 
   3181 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3182 			/*
   3183 			 * 82544 Errata 9: Certain register cannot be written
   3184 			 * with particular alignments in PCI-X bus operation
   3185 			 * (FCAH, MTA and VFTA).
   3186 			 */
   3187 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3188 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3189 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3190 		} else
   3191 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3192 
   3193 		ETHER_NEXT_MULTI(step, enm);
   3194 	}
   3195 
   3196 	ifp->if_flags &= ~IFF_ALLMULTI;
   3197 	goto setit;
   3198 
   3199  allmulti:
   3200 	ifp->if_flags |= IFF_ALLMULTI;
   3201 	sc->sc_rctl |= RCTL_MPE;
   3202 
   3203  setit:
   3204 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3205 }
   3206 
   3207 /* Reset and init related */
   3208 
   3209 static void
   3210 wm_set_vlan(struct wm_softc *sc)
   3211 {
   3212 
   3213 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3214 		device_xname(sc->sc_dev), __func__));
   3215 	/* Deal with VLAN enables. */
   3216 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3217 		sc->sc_ctrl |= CTRL_VME;
   3218 	else
   3219 		sc->sc_ctrl &= ~CTRL_VME;
   3220 
   3221 	/* Write the control registers. */
   3222 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3223 }
   3224 
   3225 static void
   3226 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3227 {
   3228 	uint32_t gcr;
   3229 	pcireg_t ctrl2;
   3230 
   3231 	gcr = CSR_READ(sc, WMREG_GCR);
   3232 
   3233 	/* Only take action if timeout value is defaulted to 0 */
   3234 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3235 		goto out;
   3236 
   3237 	if ((gcr & GCR_CAP_VER2) == 0) {
   3238 		gcr |= GCR_CMPL_TMOUT_10MS;
   3239 		goto out;
   3240 	}
   3241 
   3242 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3243 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3244 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3245 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3246 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3247 
   3248 out:
   3249 	/* Disable completion timeout resend */
   3250 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3251 
   3252 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3253 }
   3254 
   3255 void
   3256 wm_get_auto_rd_done(struct wm_softc *sc)
   3257 {
   3258 	int i;
   3259 
   3260 	/* wait for eeprom to reload */
   3261 	switch (sc->sc_type) {
   3262 	case WM_T_82571:
   3263 	case WM_T_82572:
   3264 	case WM_T_82573:
   3265 	case WM_T_82574:
   3266 	case WM_T_82583:
   3267 	case WM_T_82575:
   3268 	case WM_T_82576:
   3269 	case WM_T_82580:
   3270 	case WM_T_I350:
   3271 	case WM_T_I354:
   3272 	case WM_T_I210:
   3273 	case WM_T_I211:
   3274 	case WM_T_80003:
   3275 	case WM_T_ICH8:
   3276 	case WM_T_ICH9:
   3277 		for (i = 0; i < 10; i++) {
   3278 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3279 				break;
   3280 			delay(1000);
   3281 		}
   3282 		if (i == 10) {
   3283 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3284 			    "complete\n", device_xname(sc->sc_dev));
   3285 		}
   3286 		break;
   3287 	default:
   3288 		break;
   3289 	}
   3290 }
   3291 
   3292 void
   3293 wm_lan_init_done(struct wm_softc *sc)
   3294 {
   3295 	uint32_t reg = 0;
   3296 	int i;
   3297 
   3298 	/* wait for eeprom to reload */
   3299 	switch (sc->sc_type) {
   3300 	case WM_T_ICH10:
   3301 	case WM_T_PCH:
   3302 	case WM_T_PCH2:
   3303 	case WM_T_PCH_LPT:
   3304 	case WM_T_PCH_SPT:
   3305 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3306 			reg = CSR_READ(sc, WMREG_STATUS);
   3307 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3308 				break;
   3309 			delay(100);
   3310 		}
   3311 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3312 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3313 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3314 		}
   3315 		break;
   3316 	default:
   3317 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3318 		    __func__);
   3319 		break;
   3320 	}
   3321 
   3322 	reg &= ~STATUS_LAN_INIT_DONE;
   3323 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3324 }
   3325 
   3326 void
   3327 wm_get_cfg_done(struct wm_softc *sc)
   3328 {
   3329 	int mask;
   3330 	uint32_t reg;
   3331 	int i;
   3332 
   3333 	/* wait for eeprom to reload */
   3334 	switch (sc->sc_type) {
   3335 	case WM_T_82542_2_0:
   3336 	case WM_T_82542_2_1:
   3337 		/* null */
   3338 		break;
   3339 	case WM_T_82543:
   3340 	case WM_T_82544:
   3341 	case WM_T_82540:
   3342 	case WM_T_82545:
   3343 	case WM_T_82545_3:
   3344 	case WM_T_82546:
   3345 	case WM_T_82546_3:
   3346 	case WM_T_82541:
   3347 	case WM_T_82541_2:
   3348 	case WM_T_82547:
   3349 	case WM_T_82547_2:
   3350 	case WM_T_82573:
   3351 	case WM_T_82574:
   3352 	case WM_T_82583:
   3353 		/* generic */
   3354 		delay(10*1000);
   3355 		break;
   3356 	case WM_T_80003:
   3357 	case WM_T_82571:
   3358 	case WM_T_82572:
   3359 	case WM_T_82575:
   3360 	case WM_T_82576:
   3361 	case WM_T_82580:
   3362 	case WM_T_I350:
   3363 	case WM_T_I354:
   3364 	case WM_T_I210:
   3365 	case WM_T_I211:
   3366 		if (sc->sc_type == WM_T_82571) {
   3367 			/* Only 82571 shares port 0 */
   3368 			mask = EEMNGCTL_CFGDONE_0;
   3369 		} else
   3370 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3371 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3372 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3373 				break;
   3374 			delay(1000);
   3375 		}
   3376 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3377 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3378 				device_xname(sc->sc_dev), __func__));
   3379 		}
   3380 		break;
   3381 	case WM_T_ICH8:
   3382 	case WM_T_ICH9:
   3383 	case WM_T_ICH10:
   3384 	case WM_T_PCH:
   3385 	case WM_T_PCH2:
   3386 	case WM_T_PCH_LPT:
   3387 	case WM_T_PCH_SPT:
   3388 		delay(10*1000);
   3389 		if (sc->sc_type >= WM_T_ICH10)
   3390 			wm_lan_init_done(sc);
   3391 		else
   3392 			wm_get_auto_rd_done(sc);
   3393 
   3394 		reg = CSR_READ(sc, WMREG_STATUS);
   3395 		if ((reg & STATUS_PHYRA) != 0)
   3396 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3397 		break;
   3398 	default:
   3399 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3400 		    __func__);
   3401 		break;
   3402 	}
   3403 }
   3404 
   3405 /* Init hardware bits */
   3406 void
   3407 wm_initialize_hardware_bits(struct wm_softc *sc)
   3408 {
   3409 	uint32_t tarc0, tarc1, reg;
   3410 
   3411 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3412 		device_xname(sc->sc_dev), __func__));
   3413 	/* For 82571 variant, 80003 and ICHs */
   3414 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3415 	    || (sc->sc_type >= WM_T_80003)) {
   3416 
   3417 		/* Transmit Descriptor Control 0 */
   3418 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3419 		reg |= TXDCTL_COUNT_DESC;
   3420 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3421 
   3422 		/* Transmit Descriptor Control 1 */
   3423 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3424 		reg |= TXDCTL_COUNT_DESC;
   3425 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3426 
   3427 		/* TARC0 */
   3428 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3429 		switch (sc->sc_type) {
   3430 		case WM_T_82571:
   3431 		case WM_T_82572:
   3432 		case WM_T_82573:
   3433 		case WM_T_82574:
   3434 		case WM_T_82583:
   3435 		case WM_T_80003:
   3436 			/* Clear bits 30..27 */
   3437 			tarc0 &= ~__BITS(30, 27);
   3438 			break;
   3439 		default:
   3440 			break;
   3441 		}
   3442 
   3443 		switch (sc->sc_type) {
   3444 		case WM_T_82571:
   3445 		case WM_T_82572:
   3446 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3447 
   3448 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3449 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3450 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3451 			/* 8257[12] Errata No.7 */
   3452 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3453 
   3454 			/* TARC1 bit 28 */
   3455 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3456 				tarc1 &= ~__BIT(28);
   3457 			else
   3458 				tarc1 |= __BIT(28);
   3459 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3460 
   3461 			/*
   3462 			 * 8257[12] Errata No.13
   3463 			 * Disable Dyamic Clock Gating.
   3464 			 */
   3465 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3466 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3467 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3468 			break;
   3469 		case WM_T_82573:
   3470 		case WM_T_82574:
   3471 		case WM_T_82583:
   3472 			if ((sc->sc_type == WM_T_82574)
   3473 			    || (sc->sc_type == WM_T_82583))
   3474 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3475 
   3476 			/* Extended Device Control */
   3477 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3478 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3479 			reg |= __BIT(22);	/* Set bit 22 */
   3480 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3481 
   3482 			/* Device Control */
   3483 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3484 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3485 
   3486 			/* PCIe Control Register */
   3487 			/*
   3488 			 * 82573 Errata (unknown).
   3489 			 *
   3490 			 * 82574 Errata 25 and 82583 Errata 12
   3491 			 * "Dropped Rx Packets":
   3492 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3493 			 */
   3494 			reg = CSR_READ(sc, WMREG_GCR);
   3495 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3496 			CSR_WRITE(sc, WMREG_GCR, reg);
   3497 
   3498 			if ((sc->sc_type == WM_T_82574)
   3499 			    || (sc->sc_type == WM_T_82583)) {
   3500 				/*
   3501 				 * Document says this bit must be set for
   3502 				 * proper operation.
   3503 				 */
   3504 				reg = CSR_READ(sc, WMREG_GCR);
   3505 				reg |= __BIT(22);
   3506 				CSR_WRITE(sc, WMREG_GCR, reg);
   3507 
   3508 				/*
   3509 				 * Apply workaround for hardware errata
   3510 				 * documented in errata docs Fixes issue where
   3511 				 * some error prone or unreliable PCIe
   3512 				 * completions are occurring, particularly
   3513 				 * with ASPM enabled. Without fix, issue can
   3514 				 * cause Tx timeouts.
   3515 				 */
   3516 				reg = CSR_READ(sc, WMREG_GCR2);
   3517 				reg |= __BIT(0);
   3518 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3519 			}
   3520 			break;
   3521 		case WM_T_80003:
   3522 			/* TARC0 */
   3523 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3524 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3525 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3526 
   3527 			/* TARC1 bit 28 */
   3528 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3529 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3530 				tarc1 &= ~__BIT(28);
   3531 			else
   3532 				tarc1 |= __BIT(28);
   3533 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3534 			break;
   3535 		case WM_T_ICH8:
   3536 		case WM_T_ICH9:
   3537 		case WM_T_ICH10:
   3538 		case WM_T_PCH:
   3539 		case WM_T_PCH2:
   3540 		case WM_T_PCH_LPT:
   3541 		case WM_T_PCH_SPT:
   3542 			/* TARC0 */
   3543 			if ((sc->sc_type == WM_T_ICH8)
   3544 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3545 				/* Set TARC0 bits 29 and 28 */
   3546 				tarc0 |= __BITS(29, 28);
   3547 			}
   3548 			/* Set TARC0 bits 23,24,26,27 */
   3549 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3550 
   3551 			/* CTRL_EXT */
   3552 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3553 			reg |= __BIT(22);	/* Set bit 22 */
   3554 			/*
   3555 			 * Enable PHY low-power state when MAC is at D3
   3556 			 * w/o WoL
   3557 			 */
   3558 			if (sc->sc_type >= WM_T_PCH)
   3559 				reg |= CTRL_EXT_PHYPDEN;
   3560 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3561 
   3562 			/* TARC1 */
   3563 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3564 			/* bit 28 */
   3565 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3566 				tarc1 &= ~__BIT(28);
   3567 			else
   3568 				tarc1 |= __BIT(28);
   3569 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3570 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3571 
   3572 			/* Device Status */
   3573 			if (sc->sc_type == WM_T_ICH8) {
   3574 				reg = CSR_READ(sc, WMREG_STATUS);
   3575 				reg &= ~__BIT(31);
   3576 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3577 
   3578 			}
   3579 
   3580 			/* IOSFPC */
   3581 			if (sc->sc_type == WM_T_PCH_SPT) {
   3582 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3583 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3584 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3585 			}
   3586 			/*
   3587 			 * Work-around descriptor data corruption issue during
   3588 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3589 			 * capability.
   3590 			 */
   3591 			reg = CSR_READ(sc, WMREG_RFCTL);
   3592 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3593 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3594 			break;
   3595 		default:
   3596 			break;
   3597 		}
   3598 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3599 
   3600 		/*
   3601 		 * 8257[12] Errata No.52 and some others.
   3602 		 * Avoid RSS Hash Value bug.
   3603 		 */
   3604 		switch (sc->sc_type) {
   3605 		case WM_T_82571:
   3606 		case WM_T_82572:
   3607 		case WM_T_82573:
   3608 		case WM_T_80003:
   3609 		case WM_T_ICH8:
   3610 			reg = CSR_READ(sc, WMREG_RFCTL);
   3611 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3612 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3613 			break;
   3614 		default:
   3615 			break;
   3616 		}
   3617 	}
   3618 }
   3619 
   3620 static uint32_t
   3621 wm_rxpbs_adjust_82580(uint32_t val)
   3622 {
   3623 	uint32_t rv = 0;
   3624 
   3625 	if (val < __arraycount(wm_82580_rxpbs_table))
   3626 		rv = wm_82580_rxpbs_table[val];
   3627 
   3628 	return rv;
   3629 }
   3630 
   3631 /*
   3632  * wm_reset:
   3633  *
   3634  *	Reset the i82542 chip.
   3635  */
   3636 static void
   3637 wm_reset(struct wm_softc *sc)
   3638 {
   3639 	int phy_reset = 0;
   3640 	int i, error = 0;
   3641 	uint32_t reg, mask;
   3642 
   3643 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3644 		device_xname(sc->sc_dev), __func__));
   3645 	/*
   3646 	 * Allocate on-chip memory according to the MTU size.
   3647 	 * The Packet Buffer Allocation register must be written
   3648 	 * before the chip is reset.
   3649 	 */
   3650 	switch (sc->sc_type) {
   3651 	case WM_T_82547:
   3652 	case WM_T_82547_2:
   3653 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3654 		    PBA_22K : PBA_30K;
   3655 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3656 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3657 			txq->txq_fifo_head = 0;
   3658 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3659 			txq->txq_fifo_size =
   3660 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3661 			txq->txq_fifo_stall = 0;
   3662 		}
   3663 		break;
   3664 	case WM_T_82571:
   3665 	case WM_T_82572:
   3666 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3667 	case WM_T_80003:
   3668 		sc->sc_pba = PBA_32K;
   3669 		break;
   3670 	case WM_T_82573:
   3671 		sc->sc_pba = PBA_12K;
   3672 		break;
   3673 	case WM_T_82574:
   3674 	case WM_T_82583:
   3675 		sc->sc_pba = PBA_20K;
   3676 		break;
   3677 	case WM_T_82576:
   3678 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3679 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3680 		break;
   3681 	case WM_T_82580:
   3682 	case WM_T_I350:
   3683 	case WM_T_I354:
   3684 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3685 		break;
   3686 	case WM_T_I210:
   3687 	case WM_T_I211:
   3688 		sc->sc_pba = PBA_34K;
   3689 		break;
   3690 	case WM_T_ICH8:
   3691 		/* Workaround for a bit corruption issue in FIFO memory */
   3692 		sc->sc_pba = PBA_8K;
   3693 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3694 		break;
   3695 	case WM_T_ICH9:
   3696 	case WM_T_ICH10:
   3697 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3698 		    PBA_14K : PBA_10K;
   3699 		break;
   3700 	case WM_T_PCH:
   3701 	case WM_T_PCH2:
   3702 	case WM_T_PCH_LPT:
   3703 	case WM_T_PCH_SPT:
   3704 		sc->sc_pba = PBA_26K;
   3705 		break;
   3706 	default:
   3707 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3708 		    PBA_40K : PBA_48K;
   3709 		break;
   3710 	}
   3711 	/*
   3712 	 * Only old or non-multiqueue devices have the PBA register
   3713 	 * XXX Need special handling for 82575.
   3714 	 */
   3715 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3716 	    || (sc->sc_type == WM_T_82575))
   3717 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3718 
   3719 	/* Prevent the PCI-E bus from sticking */
   3720 	if (sc->sc_flags & WM_F_PCIE) {
   3721 		int timeout = 800;
   3722 
   3723 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3724 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3725 
   3726 		while (timeout--) {
   3727 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3728 			    == 0)
   3729 				break;
   3730 			delay(100);
   3731 		}
   3732 	}
   3733 
   3734 	/* Set the completion timeout for interface */
   3735 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3736 	    || (sc->sc_type == WM_T_82580)
   3737 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3738 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3739 		wm_set_pcie_completion_timeout(sc);
   3740 
   3741 	/* Clear interrupt */
   3742 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3743 	if (sc->sc_nintrs > 1) {
   3744 		if (sc->sc_type != WM_T_82574) {
   3745 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3746 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3747 		} else {
   3748 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3749 		}
   3750 	}
   3751 
   3752 	/* Stop the transmit and receive processes. */
   3753 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3754 	sc->sc_rctl &= ~RCTL_EN;
   3755 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3756 	CSR_WRITE_FLUSH(sc);
   3757 
   3758 	/* XXX set_tbi_sbp_82543() */
   3759 
   3760 	delay(10*1000);
   3761 
   3762 	/* Must acquire the MDIO ownership before MAC reset */
   3763 	switch (sc->sc_type) {
   3764 	case WM_T_82573:
   3765 	case WM_T_82574:
   3766 	case WM_T_82583:
   3767 		error = wm_get_hw_semaphore_82573(sc);
   3768 		break;
   3769 	default:
   3770 		break;
   3771 	}
   3772 
   3773 	/*
   3774 	 * 82541 Errata 29? & 82547 Errata 28?
   3775 	 * See also the description about PHY_RST bit in CTRL register
   3776 	 * in 8254x_GBe_SDM.pdf.
   3777 	 */
   3778 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3779 		CSR_WRITE(sc, WMREG_CTRL,
   3780 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3781 		CSR_WRITE_FLUSH(sc);
   3782 		delay(5000);
   3783 	}
   3784 
   3785 	switch (sc->sc_type) {
   3786 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3787 	case WM_T_82541:
   3788 	case WM_T_82541_2:
   3789 	case WM_T_82547:
   3790 	case WM_T_82547_2:
   3791 		/*
   3792 		 * On some chipsets, a reset through a memory-mapped write
   3793 		 * cycle can cause the chip to reset before completing the
   3794 		 * write cycle.  This causes major headache that can be
   3795 		 * avoided by issuing the reset via indirect register writes
   3796 		 * through I/O space.
   3797 		 *
   3798 		 * So, if we successfully mapped the I/O BAR at attach time,
   3799 		 * use that.  Otherwise, try our luck with a memory-mapped
   3800 		 * reset.
   3801 		 */
   3802 		if (sc->sc_flags & WM_F_IOH_VALID)
   3803 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3804 		else
   3805 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3806 		break;
   3807 	case WM_T_82545_3:
   3808 	case WM_T_82546_3:
   3809 		/* Use the shadow control register on these chips. */
   3810 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3811 		break;
   3812 	case WM_T_80003:
   3813 		mask = swfwphysem[sc->sc_funcid];
   3814 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3815 		wm_get_swfw_semaphore(sc, mask);
   3816 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3817 		wm_put_swfw_semaphore(sc, mask);
   3818 		break;
   3819 	case WM_T_ICH8:
   3820 	case WM_T_ICH9:
   3821 	case WM_T_ICH10:
   3822 	case WM_T_PCH:
   3823 	case WM_T_PCH2:
   3824 	case WM_T_PCH_LPT:
   3825 	case WM_T_PCH_SPT:
   3826 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3827 		if (wm_phy_resetisblocked(sc) == false) {
   3828 			/*
   3829 			 * Gate automatic PHY configuration by hardware on
   3830 			 * non-managed 82579
   3831 			 */
   3832 			if ((sc->sc_type == WM_T_PCH2)
   3833 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3834 				== 0))
   3835 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3836 
   3837 			reg |= CTRL_PHY_RESET;
   3838 			phy_reset = 1;
   3839 		} else
   3840 			printf("XXX reset is blocked!!!\n");
   3841 		wm_get_swfwhw_semaphore(sc);
   3842 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3843 		/* Don't insert a completion barrier when reset */
   3844 		delay(20*1000);
   3845 		wm_put_swfwhw_semaphore(sc);
   3846 		break;
   3847 	case WM_T_82580:
   3848 	case WM_T_I350:
   3849 	case WM_T_I354:
   3850 	case WM_T_I210:
   3851 	case WM_T_I211:
   3852 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3853 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3854 			CSR_WRITE_FLUSH(sc);
   3855 		delay(5000);
   3856 		break;
   3857 	case WM_T_82542_2_0:
   3858 	case WM_T_82542_2_1:
   3859 	case WM_T_82543:
   3860 	case WM_T_82540:
   3861 	case WM_T_82545:
   3862 	case WM_T_82546:
   3863 	case WM_T_82571:
   3864 	case WM_T_82572:
   3865 	case WM_T_82573:
   3866 	case WM_T_82574:
   3867 	case WM_T_82575:
   3868 	case WM_T_82576:
   3869 	case WM_T_82583:
   3870 	default:
   3871 		/* Everything else can safely use the documented method. */
   3872 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3873 		break;
   3874 	}
   3875 
   3876 	/* Must release the MDIO ownership after MAC reset */
   3877 	switch (sc->sc_type) {
   3878 	case WM_T_82573:
   3879 	case WM_T_82574:
   3880 	case WM_T_82583:
   3881 		if (error == 0)
   3882 			wm_put_hw_semaphore_82573(sc);
   3883 		break;
   3884 	default:
   3885 		break;
   3886 	}
   3887 
   3888 	if (phy_reset != 0)
   3889 		wm_get_cfg_done(sc);
   3890 
   3891 	/* reload EEPROM */
   3892 	switch (sc->sc_type) {
   3893 	case WM_T_82542_2_0:
   3894 	case WM_T_82542_2_1:
   3895 	case WM_T_82543:
   3896 	case WM_T_82544:
   3897 		delay(10);
   3898 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3899 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3900 		CSR_WRITE_FLUSH(sc);
   3901 		delay(2000);
   3902 		break;
   3903 	case WM_T_82540:
   3904 	case WM_T_82545:
   3905 	case WM_T_82545_3:
   3906 	case WM_T_82546:
   3907 	case WM_T_82546_3:
   3908 		delay(5*1000);
   3909 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3910 		break;
   3911 	case WM_T_82541:
   3912 	case WM_T_82541_2:
   3913 	case WM_T_82547:
   3914 	case WM_T_82547_2:
   3915 		delay(20000);
   3916 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3917 		break;
   3918 	case WM_T_82571:
   3919 	case WM_T_82572:
   3920 	case WM_T_82573:
   3921 	case WM_T_82574:
   3922 	case WM_T_82583:
   3923 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3924 			delay(10);
   3925 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3926 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3927 			CSR_WRITE_FLUSH(sc);
   3928 		}
   3929 		/* check EECD_EE_AUTORD */
   3930 		wm_get_auto_rd_done(sc);
   3931 		/*
   3932 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3933 		 * is set.
   3934 		 */
   3935 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3936 		    || (sc->sc_type == WM_T_82583))
   3937 			delay(25*1000);
   3938 		break;
   3939 	case WM_T_82575:
   3940 	case WM_T_82576:
   3941 	case WM_T_82580:
   3942 	case WM_T_I350:
   3943 	case WM_T_I354:
   3944 	case WM_T_I210:
   3945 	case WM_T_I211:
   3946 	case WM_T_80003:
   3947 		/* check EECD_EE_AUTORD */
   3948 		wm_get_auto_rd_done(sc);
   3949 		break;
   3950 	case WM_T_ICH8:
   3951 	case WM_T_ICH9:
   3952 	case WM_T_ICH10:
   3953 	case WM_T_PCH:
   3954 	case WM_T_PCH2:
   3955 	case WM_T_PCH_LPT:
   3956 	case WM_T_PCH_SPT:
   3957 		break;
   3958 	default:
   3959 		panic("%s: unknown type\n", __func__);
   3960 	}
   3961 
   3962 	/* Check whether EEPROM is present or not */
   3963 	switch (sc->sc_type) {
   3964 	case WM_T_82575:
   3965 	case WM_T_82576:
   3966 	case WM_T_82580:
   3967 	case WM_T_I350:
   3968 	case WM_T_I354:
   3969 	case WM_T_ICH8:
   3970 	case WM_T_ICH9:
   3971 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3972 			/* Not found */
   3973 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3974 			if (sc->sc_type == WM_T_82575)
   3975 				wm_reset_init_script_82575(sc);
   3976 		}
   3977 		break;
   3978 	default:
   3979 		break;
   3980 	}
   3981 
   3982 	if ((sc->sc_type == WM_T_82580)
   3983 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3984 		/* clear global device reset status bit */
   3985 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3986 	}
   3987 
   3988 	/* Clear any pending interrupt events. */
   3989 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3990 	reg = CSR_READ(sc, WMREG_ICR);
   3991 	if (sc->sc_nintrs > 1) {
   3992 		if (sc->sc_type != WM_T_82574) {
   3993 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3994 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3995 		} else
   3996 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3997 	}
   3998 
   3999 	/* reload sc_ctrl */
   4000 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4001 
   4002 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4003 		wm_set_eee_i350(sc);
   4004 
   4005 	/* dummy read from WUC */
   4006 	if (sc->sc_type == WM_T_PCH)
   4007 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4008 	/*
   4009 	 * For PCH, this write will make sure that any noise will be detected
   4010 	 * as a CRC error and be dropped rather than show up as a bad packet
   4011 	 * to the DMA engine
   4012 	 */
   4013 	if (sc->sc_type == WM_T_PCH)
   4014 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4015 
   4016 	if (sc->sc_type >= WM_T_82544)
   4017 		CSR_WRITE(sc, WMREG_WUC, 0);
   4018 
   4019 	wm_reset_mdicnfg_82580(sc);
   4020 
   4021 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4022 		wm_pll_workaround_i210(sc);
   4023 }
   4024 
   4025 /*
   4026  * wm_add_rxbuf:
   4027  *
   4028  *	Add a receive buffer to the indiciated descriptor.
   4029  */
   4030 static int
   4031 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4032 {
   4033 	struct wm_softc *sc = rxq->rxq_sc;
   4034 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4035 	struct mbuf *m;
   4036 	int error;
   4037 
   4038 	KASSERT(WM_RX_LOCKED(rxq));
   4039 
   4040 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4041 	if (m == NULL)
   4042 		return ENOBUFS;
   4043 
   4044 	MCLGET(m, M_DONTWAIT);
   4045 	if ((m->m_flags & M_EXT) == 0) {
   4046 		m_freem(m);
   4047 		return ENOBUFS;
   4048 	}
   4049 
   4050 	if (rxs->rxs_mbuf != NULL)
   4051 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4052 
   4053 	rxs->rxs_mbuf = m;
   4054 
   4055 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4056 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4057 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4058 	if (error) {
   4059 		/* XXX XXX XXX */
   4060 		aprint_error_dev(sc->sc_dev,
   4061 		    "unable to load rx DMA map %d, error = %d\n",
   4062 		    idx, error);
   4063 		panic("wm_add_rxbuf");
   4064 	}
   4065 
   4066 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4067 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4068 
   4069 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4070 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4071 			wm_init_rxdesc(rxq, idx);
   4072 	} else
   4073 		wm_init_rxdesc(rxq, idx);
   4074 
   4075 	return 0;
   4076 }
   4077 
   4078 /*
   4079  * wm_rxdrain:
   4080  *
   4081  *	Drain the receive queue.
   4082  */
   4083 static void
   4084 wm_rxdrain(struct wm_rxqueue *rxq)
   4085 {
   4086 	struct wm_softc *sc = rxq->rxq_sc;
   4087 	struct wm_rxsoft *rxs;
   4088 	int i;
   4089 
   4090 	KASSERT(WM_RX_LOCKED(rxq));
   4091 
   4092 	for (i = 0; i < WM_NRXDESC; i++) {
   4093 		rxs = &rxq->rxq_soft[i];
   4094 		if (rxs->rxs_mbuf != NULL) {
   4095 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4096 			m_freem(rxs->rxs_mbuf);
   4097 			rxs->rxs_mbuf = NULL;
   4098 		}
   4099 	}
   4100 }
   4101 
   4102 
   4103 /*
   4104  * XXX copy from FreeBSD's sys/net/rss_config.c
   4105  */
   4106 /*
   4107  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4108  * effectiveness may be limited by algorithm choice and available entropy
   4109  * during the boot.
   4110  *
   4111  * XXXRW: And that we don't randomize it yet!
   4112  *
   4113  * This is the default Microsoft RSS specification key which is also
   4114  * the Chelsio T5 firmware default key.
   4115  */
   4116 #define RSS_KEYSIZE 40
   4117 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4118 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4119 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4120 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4121 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4122 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4123 };
   4124 
   4125 /*
   4126  * Caller must pass an array of size sizeof(rss_key).
   4127  *
   4128  * XXX
   4129  * As if_ixgbe may use this function, this function should not be
   4130  * if_wm specific function.
   4131  */
   4132 static void
   4133 wm_rss_getkey(uint8_t *key)
   4134 {
   4135 
   4136 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4137 }
   4138 
   4139 /*
   4140  * Setup registers for RSS.
   4141  *
   4142  * XXX not yet VMDq support
   4143  */
   4144 static void
   4145 wm_init_rss(struct wm_softc *sc)
   4146 {
   4147 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4148 	int i;
   4149 
   4150 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4151 
   4152 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4153 		int qid, reta_ent;
   4154 
   4155 		qid  = i % sc->sc_nrxqueues;
   4156 		switch(sc->sc_type) {
   4157 		case WM_T_82574:
   4158 			reta_ent = __SHIFTIN(qid,
   4159 			    RETA_ENT_QINDEX_MASK_82574);
   4160 			break;
   4161 		case WM_T_82575:
   4162 			reta_ent = __SHIFTIN(qid,
   4163 			    RETA_ENT_QINDEX1_MASK_82575);
   4164 			break;
   4165 		default:
   4166 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4167 			break;
   4168 		}
   4169 
   4170 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4171 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4172 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4173 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4174 	}
   4175 
   4176 	wm_rss_getkey((uint8_t *)rss_key);
   4177 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4178 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4179 
   4180 	if (sc->sc_type == WM_T_82574)
   4181 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4182 	else
   4183 		mrqc = MRQC_ENABLE_RSS_MQ;
   4184 
   4185 	/* XXXX
   4186 	 * The same as FreeBSD igb.
   4187 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4188 	 */
   4189 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4190 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4191 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4192 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4193 
   4194 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4195 }
   4196 
   4197 /*
   4198  * Adjust TX and RX queue numbers which the system actulally uses.
   4199  *
   4200  * The numbers are affected by below parameters.
   4201  *     - The nubmer of hardware queues
   4202  *     - The number of MSI-X vectors (= "nvectors" argument)
   4203  *     - ncpu
   4204  */
   4205 static void
   4206 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4207 {
   4208 	int hw_ntxqueues, hw_nrxqueues;
   4209 
   4210 	if (nvectors < 3) {
   4211 		sc->sc_ntxqueues = 1;
   4212 		sc->sc_nrxqueues = 1;
   4213 		return;
   4214 	}
   4215 
   4216 	switch(sc->sc_type) {
   4217 	case WM_T_82572:
   4218 		hw_ntxqueues = 2;
   4219 		hw_nrxqueues = 2;
   4220 		break;
   4221 	case WM_T_82574:
   4222 		hw_ntxqueues = 2;
   4223 		hw_nrxqueues = 2;
   4224 		break;
   4225 	case WM_T_82575:
   4226 		hw_ntxqueues = 4;
   4227 		hw_nrxqueues = 4;
   4228 		break;
   4229 	case WM_T_82576:
   4230 		hw_ntxqueues = 16;
   4231 		hw_nrxqueues = 16;
   4232 		break;
   4233 	case WM_T_82580:
   4234 	case WM_T_I350:
   4235 	case WM_T_I354:
   4236 		hw_ntxqueues = 8;
   4237 		hw_nrxqueues = 8;
   4238 		break;
   4239 	case WM_T_I210:
   4240 		hw_ntxqueues = 4;
   4241 		hw_nrxqueues = 4;
   4242 		break;
   4243 	case WM_T_I211:
   4244 		hw_ntxqueues = 2;
   4245 		hw_nrxqueues = 2;
   4246 		break;
   4247 		/*
   4248 		 * As below ethernet controllers does not support MSI-X,
   4249 		 * this driver let them not use multiqueue.
   4250 		 *     - WM_T_80003
   4251 		 *     - WM_T_ICH8
   4252 		 *     - WM_T_ICH9
   4253 		 *     - WM_T_ICH10
   4254 		 *     - WM_T_PCH
   4255 		 *     - WM_T_PCH2
   4256 		 *     - WM_T_PCH_LPT
   4257 		 */
   4258 	default:
   4259 		hw_ntxqueues = 1;
   4260 		hw_nrxqueues = 1;
   4261 		break;
   4262 	}
   4263 
   4264 	/*
   4265 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4266 	 * the number of queues used actually.
   4267 	 *
   4268 	 * XXX
   4269 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4270 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4271 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4272 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4273 	 * such a way.
   4274 	 */
   4275 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4276 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4277 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4278 	} else {
   4279 		sc->sc_ntxqueues = hw_ntxqueues;
   4280 		sc->sc_nrxqueues = hw_nrxqueues;
   4281 	}
   4282 
   4283 	/*
   4284 	 * As queues more then cpus cannot improve scaling, we limit
   4285 	 * the number of queues used actually.
   4286 	 */
   4287 	if (ncpu < sc->sc_ntxqueues)
   4288 		sc->sc_ntxqueues = ncpu;
   4289 	if (ncpu < sc->sc_nrxqueues)
   4290 		sc->sc_nrxqueues = ncpu;
   4291 
   4292 	/* XXX Currently, this driver supports RX multiqueue only. */
   4293 	sc->sc_ntxqueues = 1;
   4294 }
   4295 
   4296 /*
   4297  * Both single interrupt MSI and INTx can use this function.
   4298  */
   4299 static int
   4300 wm_setup_legacy(struct wm_softc *sc)
   4301 {
   4302 	pci_chipset_tag_t pc = sc->sc_pc;
   4303 	const char *intrstr = NULL;
   4304 	char intrbuf[PCI_INTRSTR_LEN];
   4305 	int error;
   4306 
   4307 	error = wm_alloc_txrx_queues(sc);
   4308 	if (error) {
   4309 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4310 		    error);
   4311 		return ENOMEM;
   4312 	}
   4313 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4314 	    sizeof(intrbuf));
   4315 #ifdef WM_MPSAFE
   4316 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4317 #endif
   4318 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4319 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4320 	if (sc->sc_ihs[0] == NULL) {
   4321 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4322 		    (pci_intr_type(sc->sc_intrs[0])
   4323 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4324 		return ENOMEM;
   4325 	}
   4326 
   4327 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4328 	sc->sc_nintrs = 1;
   4329 	return 0;
   4330 }
   4331 
   4332 static int
   4333 wm_setup_msix(struct wm_softc *sc)
   4334 {
   4335 	void *vih;
   4336 	kcpuset_t *affinity;
   4337 	int qidx, error, intr_idx, tx_established, rx_established;
   4338 	pci_chipset_tag_t pc = sc->sc_pc;
   4339 	const char *intrstr = NULL;
   4340 	char intrbuf[PCI_INTRSTR_LEN];
   4341 	char intr_xname[INTRDEVNAMEBUF];
   4342 	/*
   4343 	 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
   4344 	 * start from CPU#1.
   4345 	 */
   4346 	int affinity_offset = 1;
   4347 
   4348 	error = wm_alloc_txrx_queues(sc);
   4349 	if (error) {
   4350 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4351 		    error);
   4352 		return ENOMEM;
   4353 	}
   4354 
   4355 	kcpuset_create(&affinity, false);
   4356 	intr_idx = 0;
   4357 
   4358 	/*
   4359 	 * TX
   4360 	 */
   4361 	tx_established = 0;
   4362 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4363 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4364 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4365 
   4366 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4367 		    sizeof(intrbuf));
   4368 #ifdef WM_MPSAFE
   4369 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4370 		    PCI_INTR_MPSAFE, true);
   4371 #endif
   4372 		memset(intr_xname, 0, sizeof(intr_xname));
   4373 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4374 		    device_xname(sc->sc_dev), qidx);
   4375 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4376 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4377 		if (vih == NULL) {
   4378 			aprint_error_dev(sc->sc_dev,
   4379 			    "unable to establish MSI-X(for TX)%s%s\n",
   4380 			    intrstr ? " at " : "",
   4381 			    intrstr ? intrstr : "");
   4382 
   4383 			goto fail_0;
   4384 		}
   4385 		kcpuset_zero(affinity);
   4386 		/* Round-robin affinity */
   4387 		kcpuset_set(affinity, affinity_to);
   4388 		error = interrupt_distribute(vih, affinity, NULL);
   4389 		if (error == 0) {
   4390 			aprint_normal_dev(sc->sc_dev,
   4391 			    "for TX interrupting at %s affinity to %u\n",
   4392 			    intrstr, affinity_to);
   4393 		} else {
   4394 			aprint_normal_dev(sc->sc_dev,
   4395 			    "for TX interrupting at %s\n", intrstr);
   4396 		}
   4397 		sc->sc_ihs[intr_idx] = vih;
   4398 		txq->txq_id = qidx;
   4399 		txq->txq_intr_idx = intr_idx;
   4400 
   4401 		tx_established++;
   4402 		intr_idx++;
   4403 	}
   4404 
   4405 	/*
   4406 	 * RX
   4407 	 */
   4408 	rx_established = 0;
   4409 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4410 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4411 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4412 
   4413 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4414 		    sizeof(intrbuf));
   4415 #ifdef WM_MPSAFE
   4416 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4417 		    PCI_INTR_MPSAFE, true);
   4418 #endif
   4419 		memset(intr_xname, 0, sizeof(intr_xname));
   4420 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4421 		    device_xname(sc->sc_dev), qidx);
   4422 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4423 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4424 		if (vih == NULL) {
   4425 			aprint_error_dev(sc->sc_dev,
   4426 			    "unable to establish MSI-X(for RX)%s%s\n",
   4427 			    intrstr ? " at " : "",
   4428 			    intrstr ? intrstr : "");
   4429 
   4430 			goto fail_1;
   4431 		}
   4432 		kcpuset_zero(affinity);
   4433 		/* Round-robin affinity */
   4434 		kcpuset_set(affinity, affinity_to);
   4435 		error = interrupt_distribute(vih, affinity, NULL);
   4436 		if (error == 0) {
   4437 			aprint_normal_dev(sc->sc_dev,
   4438 			    "for RX interrupting at %s affinity to %u\n",
   4439 			    intrstr, affinity_to);
   4440 		} else {
   4441 			aprint_normal_dev(sc->sc_dev,
   4442 			    "for RX interrupting at %s\n", intrstr);
   4443 		}
   4444 		sc->sc_ihs[intr_idx] = vih;
   4445 		rxq->rxq_id = qidx;
   4446 		rxq->rxq_intr_idx = intr_idx;
   4447 
   4448 		rx_established++;
   4449 		intr_idx++;
   4450 	}
   4451 
   4452 	/*
   4453 	 * LINK
   4454 	 */
   4455 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4456 	    sizeof(intrbuf));
   4457 #ifdef WM_MPSAFE
   4458 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4459 #endif
   4460 	memset(intr_xname, 0, sizeof(intr_xname));
   4461 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4462 	    device_xname(sc->sc_dev));
   4463 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4464 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4465 	if (vih == NULL) {
   4466 		aprint_error_dev(sc->sc_dev,
   4467 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4468 		    intrstr ? " at " : "",
   4469 		    intrstr ? intrstr : "");
   4470 
   4471 		goto fail_1;
   4472 	}
   4473 	/* keep default affinity to LINK interrupt */
   4474 	aprint_normal_dev(sc->sc_dev,
   4475 	    "for LINK interrupting at %s\n", intrstr);
   4476 	sc->sc_ihs[intr_idx] = vih;
   4477 	sc->sc_link_intr_idx = intr_idx;
   4478 
   4479 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4480 	kcpuset_destroy(affinity);
   4481 	return 0;
   4482 
   4483  fail_1:
   4484 	for (qidx = 0; qidx < rx_established; qidx++) {
   4485 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4486 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[rxq->rxq_intr_idx]);
   4487 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4488 	}
   4489  fail_0:
   4490 	for (qidx = 0; qidx < tx_established; qidx++) {
   4491 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4492 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[txq->txq_intr_idx]);
   4493 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4494 	}
   4495 
   4496 	kcpuset_destroy(affinity);
   4497 	return ENOMEM;
   4498 }
   4499 
   4500 /*
   4501  * wm_init:		[ifnet interface function]
   4502  *
   4503  *	Initialize the interface.
   4504  */
   4505 static int
   4506 wm_init(struct ifnet *ifp)
   4507 {
   4508 	struct wm_softc *sc = ifp->if_softc;
   4509 	int ret;
   4510 
   4511 	WM_CORE_LOCK(sc);
   4512 	ret = wm_init_locked(ifp);
   4513 	WM_CORE_UNLOCK(sc);
   4514 
   4515 	return ret;
   4516 }
   4517 
   4518 static int
   4519 wm_init_locked(struct ifnet *ifp)
   4520 {
   4521 	struct wm_softc *sc = ifp->if_softc;
   4522 	int i, j, trynum, error = 0;
   4523 	uint32_t reg;
   4524 
   4525 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4526 		device_xname(sc->sc_dev), __func__));
   4527 	KASSERT(WM_CORE_LOCKED(sc));
   4528 	/*
   4529 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4530 	 * There is a small but measurable benefit to avoiding the adjusment
   4531 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4532 	 * on such platforms.  One possibility is that the DMA itself is
   4533 	 * slightly more efficient if the front of the entire packet (instead
   4534 	 * of the front of the headers) is aligned.
   4535 	 *
   4536 	 * Note we must always set align_tweak to 0 if we are using
   4537 	 * jumbo frames.
   4538 	 */
   4539 #ifdef __NO_STRICT_ALIGNMENT
   4540 	sc->sc_align_tweak = 0;
   4541 #else
   4542 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4543 		sc->sc_align_tweak = 0;
   4544 	else
   4545 		sc->sc_align_tweak = 2;
   4546 #endif /* __NO_STRICT_ALIGNMENT */
   4547 
   4548 	/* Cancel any pending I/O. */
   4549 	wm_stop_locked(ifp, 0);
   4550 
   4551 	/* update statistics before reset */
   4552 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4553 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4554 
   4555 	/* Reset the chip to a known state. */
   4556 	wm_reset(sc);
   4557 
   4558 	switch (sc->sc_type) {
   4559 	case WM_T_82571:
   4560 	case WM_T_82572:
   4561 	case WM_T_82573:
   4562 	case WM_T_82574:
   4563 	case WM_T_82583:
   4564 	case WM_T_80003:
   4565 	case WM_T_ICH8:
   4566 	case WM_T_ICH9:
   4567 	case WM_T_ICH10:
   4568 	case WM_T_PCH:
   4569 	case WM_T_PCH2:
   4570 	case WM_T_PCH_LPT:
   4571 	case WM_T_PCH_SPT:
   4572 		/* AMT based hardware can now take control from firmware */
   4573 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4574 			wm_get_hw_control(sc);
   4575 		break;
   4576 	default:
   4577 		break;
   4578 	}
   4579 
   4580 	/* Init hardware bits */
   4581 	wm_initialize_hardware_bits(sc);
   4582 
   4583 	/* Reset the PHY. */
   4584 	if (sc->sc_flags & WM_F_HAS_MII)
   4585 		wm_gmii_reset(sc);
   4586 
   4587 	/* Calculate (E)ITR value */
   4588 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4589 		sc->sc_itr = 450;	/* For EITR */
   4590 	} else if (sc->sc_type >= WM_T_82543) {
   4591 		/*
   4592 		 * Set up the interrupt throttling register (units of 256ns)
   4593 		 * Note that a footnote in Intel's documentation says this
   4594 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4595 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4596 		 * that that is also true for the 1024ns units of the other
   4597 		 * interrupt-related timer registers -- so, really, we ought
   4598 		 * to divide this value by 4 when the link speed is low.
   4599 		 *
   4600 		 * XXX implement this division at link speed change!
   4601 		 */
   4602 
   4603 		/*
   4604 		 * For N interrupts/sec, set this value to:
   4605 		 * 1000000000 / (N * 256).  Note that we set the
   4606 		 * absolute and packet timer values to this value
   4607 		 * divided by 4 to get "simple timer" behavior.
   4608 		 */
   4609 
   4610 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4611 	}
   4612 
   4613 	error = wm_init_txrx_queues(sc);
   4614 	if (error)
   4615 		goto out;
   4616 
   4617 	/*
   4618 	 * Clear out the VLAN table -- we don't use it (yet).
   4619 	 */
   4620 	CSR_WRITE(sc, WMREG_VET, 0);
   4621 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4622 		trynum = 10; /* Due to hw errata */
   4623 	else
   4624 		trynum = 1;
   4625 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4626 		for (j = 0; j < trynum; j++)
   4627 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4628 
   4629 	/*
   4630 	 * Set up flow-control parameters.
   4631 	 *
   4632 	 * XXX Values could probably stand some tuning.
   4633 	 */
   4634 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4635 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4636 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4637 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4638 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4639 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4640 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4641 	}
   4642 
   4643 	sc->sc_fcrtl = FCRTL_DFLT;
   4644 	if (sc->sc_type < WM_T_82543) {
   4645 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4646 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4647 	} else {
   4648 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4649 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4650 	}
   4651 
   4652 	if (sc->sc_type == WM_T_80003)
   4653 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4654 	else
   4655 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4656 
   4657 	/* Writes the control register. */
   4658 	wm_set_vlan(sc);
   4659 
   4660 	if (sc->sc_flags & WM_F_HAS_MII) {
   4661 		int val;
   4662 
   4663 		switch (sc->sc_type) {
   4664 		case WM_T_80003:
   4665 		case WM_T_ICH8:
   4666 		case WM_T_ICH9:
   4667 		case WM_T_ICH10:
   4668 		case WM_T_PCH:
   4669 		case WM_T_PCH2:
   4670 		case WM_T_PCH_LPT:
   4671 		case WM_T_PCH_SPT:
   4672 			/*
   4673 			 * Set the mac to wait the maximum time between each
   4674 			 * iteration and increase the max iterations when
   4675 			 * polling the phy; this fixes erroneous timeouts at
   4676 			 * 10Mbps.
   4677 			 */
   4678 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4679 			    0xFFFF);
   4680 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4681 			val |= 0x3F;
   4682 			wm_kmrn_writereg(sc,
   4683 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4684 			break;
   4685 		default:
   4686 			break;
   4687 		}
   4688 
   4689 		if (sc->sc_type == WM_T_80003) {
   4690 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4691 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4692 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4693 
   4694 			/* Bypass RX and TX FIFO's */
   4695 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4696 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4697 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4698 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4699 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4700 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4701 		}
   4702 	}
   4703 #if 0
   4704 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4705 #endif
   4706 
   4707 	/* Set up checksum offload parameters. */
   4708 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4709 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4710 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4711 		reg |= RXCSUM_IPOFL;
   4712 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4713 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4714 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4715 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4716 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4717 
   4718 	/* Set up MSI-X */
   4719 	if (sc->sc_nintrs > 1) {
   4720 		uint32_t ivar;
   4721 		struct wm_txqueue *txq;
   4722 		struct wm_rxqueue *rxq;
   4723 		int qid;
   4724 
   4725 		if (sc->sc_type == WM_T_82575) {
   4726 			/* Interrupt control */
   4727 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4728 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4729 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4730 
   4731 			/* TX */
   4732 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4733 				txq = &sc->sc_txq[i];
   4734 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4735 				    EITR_TX_QUEUE(txq->txq_id));
   4736 			}
   4737 			/* RX */
   4738 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4739 				rxq = &sc->sc_rxq[i];
   4740 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4741 				    EITR_RX_QUEUE(rxq->rxq_id));
   4742 			}
   4743 			/* Link status */
   4744 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4745 			    EITR_OTHER);
   4746 		} else if (sc->sc_type == WM_T_82574) {
   4747 			/* Interrupt control */
   4748 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4749 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4750 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4751 
   4752 			ivar = 0;
   4753 			/* TX */
   4754 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4755 				txq = &sc->sc_txq[i];
   4756 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4757 					| txq->txq_intr_idx),
   4758 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4759 			}
   4760 			/* RX */
   4761 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4762 				rxq = &sc->sc_rxq[i];
   4763 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4764 					| rxq->rxq_intr_idx),
   4765 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4766 			}
   4767 			/* Link status */
   4768 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4769 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4770 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4771 		} else {
   4772 			/* Interrupt control */
   4773 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4774 			    | GPIE_EIAME | GPIE_PBA);
   4775 
   4776 			switch (sc->sc_type) {
   4777 			case WM_T_82580:
   4778 			case WM_T_I350:
   4779 			case WM_T_I354:
   4780 			case WM_T_I210:
   4781 			case WM_T_I211:
   4782 				/* TX */
   4783 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4784 					txq = &sc->sc_txq[i];
   4785 					qid = txq->txq_id;
   4786 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4787 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4788 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4789 						| IVAR_VALID),
   4790 					    IVAR_TX_MASK_Q(qid));
   4791 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4792 				}
   4793 
   4794 				/* RX */
   4795 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4796 					rxq = &sc->sc_rxq[i];
   4797 					qid = rxq->rxq_id;
   4798 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4799 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4800 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4801 						| IVAR_VALID),
   4802 					    IVAR_RX_MASK_Q(qid));
   4803 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4804 				}
   4805 				break;
   4806 			case WM_T_82576:
   4807 				/* TX */
   4808 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4809 					txq = &sc->sc_txq[i];
   4810 					qid = txq->txq_id;
   4811 					ivar = CSR_READ(sc,
   4812 					    WMREG_IVAR_Q_82576(qid));
   4813 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4814 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4815 						| IVAR_VALID),
   4816 					    IVAR_TX_MASK_Q_82576(qid));
   4817 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4818 					    ivar);
   4819 				}
   4820 
   4821 				/* RX */
   4822 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4823 					rxq = &sc->sc_rxq[i];
   4824 					qid = rxq->rxq_id;
   4825 					ivar = CSR_READ(sc,
   4826 					    WMREG_IVAR_Q_82576(qid));
   4827 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4828 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4829 						| IVAR_VALID),
   4830 					    IVAR_RX_MASK_Q_82576(qid));
   4831 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4832 					    ivar);
   4833 				}
   4834 				break;
   4835 			default:
   4836 				break;
   4837 			}
   4838 
   4839 			/* Link status */
   4840 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4841 			    IVAR_MISC_OTHER);
   4842 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4843 		}
   4844 
   4845 		if (sc->sc_nrxqueues > 1) {
   4846 			wm_init_rss(sc);
   4847 
   4848 			/*
   4849 			** NOTE: Receive Full-Packet Checksum Offload
   4850 			** is mutually exclusive with Multiqueue. However
   4851 			** this is not the same as TCP/IP checksums which
   4852 			** still work.
   4853 			*/
   4854 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4855 			reg |= RXCSUM_PCSD;
   4856 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4857 		}
   4858 	}
   4859 
   4860 	/* Set up the interrupt registers. */
   4861 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4862 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4863 	    ICR_RXO | ICR_RXT0;
   4864 	if (sc->sc_nintrs > 1) {
   4865 		uint32_t mask;
   4866 		struct wm_txqueue *txq;
   4867 		struct wm_rxqueue *rxq;
   4868 
   4869 		switch (sc->sc_type) {
   4870 		case WM_T_82574:
   4871 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4872 			    WMREG_EIAC_82574_MSIX_MASK);
   4873 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4874 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4875 			break;
   4876 		default:
   4877 			if (sc->sc_type == WM_T_82575) {
   4878 				mask = 0;
   4879 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4880 					txq = &sc->sc_txq[i];
   4881 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4882 				}
   4883 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4884 					rxq = &sc->sc_rxq[i];
   4885 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4886 				}
   4887 				mask |= EITR_OTHER;
   4888 			} else {
   4889 				mask = 0;
   4890 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4891 					txq = &sc->sc_txq[i];
   4892 					mask |= 1 << txq->txq_intr_idx;
   4893 				}
   4894 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4895 					rxq = &sc->sc_rxq[i];
   4896 					mask |= 1 << rxq->rxq_intr_idx;
   4897 				}
   4898 				mask |= 1 << sc->sc_link_intr_idx;
   4899 			}
   4900 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4901 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4902 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4903 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4904 			break;
   4905 		}
   4906 	} else
   4907 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4908 
   4909 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4910 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4911 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4912 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4913 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4914 		reg |= KABGTXD_BGSQLBIAS;
   4915 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4916 	}
   4917 
   4918 	/* Set up the inter-packet gap. */
   4919 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4920 
   4921 	if (sc->sc_type >= WM_T_82543) {
   4922 		/*
   4923 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4924 		 * the multi queue function with MSI-X.
   4925 		 */
   4926 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4927 			int qidx;
   4928 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4929 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4930 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4931 				    sc->sc_itr);
   4932 			}
   4933 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4934 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4935 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4936 				    sc->sc_itr);
   4937 			}
   4938 			/*
   4939 			 * Link interrupts occur much less than TX
   4940 			 * interrupts and RX interrupts. So, we don't
   4941 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4942 			 * FreeBSD's if_igb.
   4943 			 */
   4944 		} else
   4945 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4946 	}
   4947 
   4948 	/* Set the VLAN ethernetype. */
   4949 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4950 
   4951 	/*
   4952 	 * Set up the transmit control register; we start out with
   4953 	 * a collision distance suitable for FDX, but update it whe
   4954 	 * we resolve the media type.
   4955 	 */
   4956 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4957 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4958 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4959 	if (sc->sc_type >= WM_T_82571)
   4960 		sc->sc_tctl |= TCTL_MULR;
   4961 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4962 
   4963 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4964 		/* Write TDT after TCTL.EN is set. See the document. */
   4965 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4966 	}
   4967 
   4968 	if (sc->sc_type == WM_T_80003) {
   4969 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4970 		reg &= ~TCTL_EXT_GCEX_MASK;
   4971 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4972 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4973 	}
   4974 
   4975 	/* Set the media. */
   4976 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4977 		goto out;
   4978 
   4979 	/* Configure for OS presence */
   4980 	wm_init_manageability(sc);
   4981 
   4982 	/*
   4983 	 * Set up the receive control register; we actually program
   4984 	 * the register when we set the receive filter.  Use multicast
   4985 	 * address offset type 0.
   4986 	 *
   4987 	 * Only the i82544 has the ability to strip the incoming
   4988 	 * CRC, so we don't enable that feature.
   4989 	 */
   4990 	sc->sc_mchash_type = 0;
   4991 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4992 	    | RCTL_MO(sc->sc_mchash_type);
   4993 
   4994 	/*
   4995 	 * The I350 has a bug where it always strips the CRC whether
   4996 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4997 	 */
   4998 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4999 	    || (sc->sc_type == WM_T_I210))
   5000 		sc->sc_rctl |= RCTL_SECRC;
   5001 
   5002 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5003 	    && (ifp->if_mtu > ETHERMTU)) {
   5004 		sc->sc_rctl |= RCTL_LPE;
   5005 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5006 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5007 	}
   5008 
   5009 	if (MCLBYTES == 2048) {
   5010 		sc->sc_rctl |= RCTL_2k;
   5011 	} else {
   5012 		if (sc->sc_type >= WM_T_82543) {
   5013 			switch (MCLBYTES) {
   5014 			case 4096:
   5015 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5016 				break;
   5017 			case 8192:
   5018 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5019 				break;
   5020 			case 16384:
   5021 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5022 				break;
   5023 			default:
   5024 				panic("wm_init: MCLBYTES %d unsupported",
   5025 				    MCLBYTES);
   5026 				break;
   5027 			}
   5028 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5029 	}
   5030 
   5031 	/* Set the receive filter. */
   5032 	wm_set_filter(sc);
   5033 
   5034 	/* Enable ECC */
   5035 	switch (sc->sc_type) {
   5036 	case WM_T_82571:
   5037 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5038 		reg |= PBA_ECC_CORR_EN;
   5039 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5040 		break;
   5041 	case WM_T_PCH_LPT:
   5042 	case WM_T_PCH_SPT:
   5043 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5044 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5045 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5046 
   5047 		reg = CSR_READ(sc, WMREG_CTRL);
   5048 		reg |= CTRL_MEHE;
   5049 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5050 		break;
   5051 	default:
   5052 		break;
   5053 	}
   5054 
   5055 	/* On 575 and later set RDT only if RX enabled */
   5056 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5057 		int qidx;
   5058 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   5059 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   5060 			for (i = 0; i < WM_NRXDESC; i++) {
   5061 				WM_RX_LOCK(rxq);
   5062 				wm_init_rxdesc(rxq, i);
   5063 				WM_RX_UNLOCK(rxq);
   5064 
   5065 			}
   5066 		}
   5067 	}
   5068 
   5069 	sc->sc_stopping = false;
   5070 
   5071 	/* Start the one second link check clock. */
   5072 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5073 
   5074 	/* ...all done! */
   5075 	ifp->if_flags |= IFF_RUNNING;
   5076 	ifp->if_flags &= ~IFF_OACTIVE;
   5077 
   5078  out:
   5079 	sc->sc_if_flags = ifp->if_flags;
   5080 	if (error)
   5081 		log(LOG_ERR, "%s: interface not running\n",
   5082 		    device_xname(sc->sc_dev));
   5083 	return error;
   5084 }
   5085 
   5086 /*
   5087  * wm_stop:		[ifnet interface function]
   5088  *
   5089  *	Stop transmission on the interface.
   5090  */
   5091 static void
   5092 wm_stop(struct ifnet *ifp, int disable)
   5093 {
   5094 	struct wm_softc *sc = ifp->if_softc;
   5095 
   5096 	WM_CORE_LOCK(sc);
   5097 	wm_stop_locked(ifp, disable);
   5098 	WM_CORE_UNLOCK(sc);
   5099 }
   5100 
   5101 static void
   5102 wm_stop_locked(struct ifnet *ifp, int disable)
   5103 {
   5104 	struct wm_softc *sc = ifp->if_softc;
   5105 	struct wm_txsoft *txs;
   5106 	int i, qidx;
   5107 
   5108 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5109 		device_xname(sc->sc_dev), __func__));
   5110 	KASSERT(WM_CORE_LOCKED(sc));
   5111 
   5112 	sc->sc_stopping = true;
   5113 
   5114 	/* Stop the one second clock. */
   5115 	callout_stop(&sc->sc_tick_ch);
   5116 
   5117 	/* Stop the 82547 Tx FIFO stall check timer. */
   5118 	if (sc->sc_type == WM_T_82547)
   5119 		callout_stop(&sc->sc_txfifo_ch);
   5120 
   5121 	if (sc->sc_flags & WM_F_HAS_MII) {
   5122 		/* Down the MII. */
   5123 		mii_down(&sc->sc_mii);
   5124 	} else {
   5125 #if 0
   5126 		/* Should we clear PHY's status properly? */
   5127 		wm_reset(sc);
   5128 #endif
   5129 	}
   5130 
   5131 	/* Stop the transmit and receive processes. */
   5132 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5133 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5134 	sc->sc_rctl &= ~RCTL_EN;
   5135 
   5136 	/*
   5137 	 * Clear the interrupt mask to ensure the device cannot assert its
   5138 	 * interrupt line.
   5139 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5140 	 * service any currently pending or shared interrupt.
   5141 	 */
   5142 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5143 	sc->sc_icr = 0;
   5144 	if (sc->sc_nintrs > 1) {
   5145 		if (sc->sc_type != WM_T_82574) {
   5146 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5147 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5148 		} else
   5149 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5150 	}
   5151 
   5152 	/* Release any queued transmit buffers. */
   5153 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5154 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5155 		WM_TX_LOCK(txq);
   5156 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5157 			txs = &txq->txq_soft[i];
   5158 			if (txs->txs_mbuf != NULL) {
   5159 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5160 				m_freem(txs->txs_mbuf);
   5161 				txs->txs_mbuf = NULL;
   5162 			}
   5163 		}
   5164 		if (sc->sc_type == WM_T_PCH_SPT) {
   5165 			pcireg_t preg;
   5166 			uint32_t reg;
   5167 			int nexttx;
   5168 
   5169 			/* First, disable MULR fix in FEXTNVM11 */
   5170 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5171 			reg |= FEXTNVM11_DIS_MULRFIX;
   5172 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5173 
   5174 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5175 			    WM_PCI_DESCRING_STATUS);
   5176 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5177 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5178 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5179 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5180 			    && (reg != 0)) {
   5181 				/* TX */
   5182 				printf("XXX need TX flush (reg = %08x)\n",
   5183 				    preg);
   5184 				wm_init_tx_descs(sc, txq);
   5185 				wm_init_tx_regs(sc, txq);
   5186 				nexttx = txq->txq_next;
   5187 				wm_set_dma_addr(
   5188 					&txq->txq_descs[nexttx].wtx_addr,
   5189 					WM_CDTXADDR(txq, nexttx));
   5190 				txq->txq_descs[nexttx].wtx_cmdlen
   5191 				    = htole32(WTX_CMD_IFCS | 512);
   5192 				wm_cdtxsync(txq, nexttx, 1,
   5193 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5194 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5195 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5196 				CSR_WRITE_FLUSH(sc);
   5197 				delay(250);
   5198 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5199 			}
   5200 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5201 			    WM_PCI_DESCRING_STATUS);
   5202 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5203 				/* RX */
   5204 				printf("XXX need RX flush\n");
   5205 			}
   5206 		}
   5207 		WM_TX_UNLOCK(txq);
   5208 	}
   5209 
   5210 	/* Mark the interface as down and cancel the watchdog timer. */
   5211 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5212 	ifp->if_timer = 0;
   5213 
   5214 	if (disable) {
   5215 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5216 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5217 			WM_RX_LOCK(rxq);
   5218 			wm_rxdrain(rxq);
   5219 			WM_RX_UNLOCK(rxq);
   5220 		}
   5221 	}
   5222 
   5223 #if 0 /* notyet */
   5224 	if (sc->sc_type >= WM_T_82544)
   5225 		CSR_WRITE(sc, WMREG_WUC, 0);
   5226 #endif
   5227 }
   5228 
   5229 static void
   5230 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5231 {
   5232 	struct mbuf *m;
   5233 	int i;
   5234 
   5235 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5236 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5237 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5238 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5239 		    m->m_data, m->m_len, m->m_flags);
   5240 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5241 	    i, i == 1 ? "" : "s");
   5242 }
   5243 
   5244 /*
   5245  * wm_82547_txfifo_stall:
   5246  *
   5247  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5248  *	reset the FIFO pointers, and restart packet transmission.
   5249  */
   5250 static void
   5251 wm_82547_txfifo_stall(void *arg)
   5252 {
   5253 	struct wm_softc *sc = arg;
   5254 	struct wm_txqueue *txq = sc->sc_txq;
   5255 #ifndef WM_MPSAFE
   5256 	int s;
   5257 
   5258 	s = splnet();
   5259 #endif
   5260 	WM_TX_LOCK(txq);
   5261 
   5262 	if (sc->sc_stopping)
   5263 		goto out;
   5264 
   5265 	if (txq->txq_fifo_stall) {
   5266 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5267 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5268 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5269 			/*
   5270 			 * Packets have drained.  Stop transmitter, reset
   5271 			 * FIFO pointers, restart transmitter, and kick
   5272 			 * the packet queue.
   5273 			 */
   5274 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5275 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5276 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5277 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5278 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5279 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5280 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5281 			CSR_WRITE_FLUSH(sc);
   5282 
   5283 			txq->txq_fifo_head = 0;
   5284 			txq->txq_fifo_stall = 0;
   5285 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5286 		} else {
   5287 			/*
   5288 			 * Still waiting for packets to drain; try again in
   5289 			 * another tick.
   5290 			 */
   5291 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5292 		}
   5293 	}
   5294 
   5295 out:
   5296 	WM_TX_UNLOCK(txq);
   5297 #ifndef WM_MPSAFE
   5298 	splx(s);
   5299 #endif
   5300 }
   5301 
   5302 /*
   5303  * wm_82547_txfifo_bugchk:
   5304  *
   5305  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5306  *	prevent enqueueing a packet that would wrap around the end
   5307  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5308  *
   5309  *	We do this by checking the amount of space before the end
   5310  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5311  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5312  *	the internal FIFO pointers to the beginning, and restart
   5313  *	transmission on the interface.
   5314  */
   5315 #define	WM_FIFO_HDR		0x10
   5316 #define	WM_82547_PAD_LEN	0x3e0
   5317 static int
   5318 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5319 {
   5320 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5321 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5322 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5323 
   5324 	/* Just return if already stalled. */
   5325 	if (txq->txq_fifo_stall)
   5326 		return 1;
   5327 
   5328 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5329 		/* Stall only occurs in half-duplex mode. */
   5330 		goto send_packet;
   5331 	}
   5332 
   5333 	if (len >= WM_82547_PAD_LEN + space) {
   5334 		txq->txq_fifo_stall = 1;
   5335 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5336 		return 1;
   5337 	}
   5338 
   5339  send_packet:
   5340 	txq->txq_fifo_head += len;
   5341 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5342 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5343 
   5344 	return 0;
   5345 }
   5346 
   5347 static int
   5348 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5349 {
   5350 	int error;
   5351 
   5352 	/*
   5353 	 * Allocate the control data structures, and create and load the
   5354 	 * DMA map for it.
   5355 	 *
   5356 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5357 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5358 	 * both sets within the same 4G segment.
   5359 	 */
   5360 	if (sc->sc_type < WM_T_82544) {
   5361 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5362 		txq->txq_descs_size = sizeof(wiseman_txdesc_t) *WM_NTXDESC(txq);
   5363 	} else {
   5364 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5365 		txq->txq_descs_size = sizeof(txdescs_t);
   5366 	}
   5367 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5368 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5369 	else
   5370 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5371 
   5372 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_descs_size,
   5373 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5374 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5375 		aprint_error_dev(sc->sc_dev,
   5376 		    "unable to allocate TX control data, error = %d\n",
   5377 		    error);
   5378 		goto fail_0;
   5379 	}
   5380 
   5381 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5382 		    txq->txq_desc_rseg, txq->txq_descs_size,
   5383 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5384 		aprint_error_dev(sc->sc_dev,
   5385 		    "unable to map TX control data, error = %d\n", error);
   5386 		goto fail_1;
   5387 	}
   5388 
   5389 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_descs_size, 1,
   5390 		    txq->txq_descs_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5391 		aprint_error_dev(sc->sc_dev,
   5392 		    "unable to create TX control data DMA map, error = %d\n",
   5393 		    error);
   5394 		goto fail_2;
   5395 	}
   5396 
   5397 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5398 		    txq->txq_descs_u, txq->txq_descs_size, NULL, 0)) != 0) {
   5399 		aprint_error_dev(sc->sc_dev,
   5400 		    "unable to load TX control data DMA map, error = %d\n",
   5401 		    error);
   5402 		goto fail_3;
   5403 	}
   5404 
   5405 	return 0;
   5406 
   5407  fail_3:
   5408 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5409  fail_2:
   5410 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5411 	    txq->txq_descs_size);
   5412  fail_1:
   5413 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5414  fail_0:
   5415 	return error;
   5416 }
   5417 
   5418 static void
   5419 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5420 {
   5421 
   5422 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5423 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5424 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5425 	    txq->txq_descs_size);
   5426 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5427 }
   5428 
   5429 static int
   5430 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5431 {
   5432 	int error;
   5433 
   5434 	/*
   5435 	 * Allocate the control data structures, and create and load the
   5436 	 * DMA map for it.
   5437 	 *
   5438 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5439 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5440 	 * both sets within the same 4G segment.
   5441 	 */
   5442 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5443 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5444 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5445 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5446 		aprint_error_dev(sc->sc_dev,
   5447 		    "unable to allocate RX control data, error = %d\n",
   5448 		    error);
   5449 		goto fail_0;
   5450 	}
   5451 
   5452 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5453 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5454 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5455 		aprint_error_dev(sc->sc_dev,
   5456 		    "unable to map RX control data, error = %d\n", error);
   5457 		goto fail_1;
   5458 	}
   5459 
   5460 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5461 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5462 		aprint_error_dev(sc->sc_dev,
   5463 		    "unable to create RX control data DMA map, error = %d\n",
   5464 		    error);
   5465 		goto fail_2;
   5466 	}
   5467 
   5468 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5469 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5470 		aprint_error_dev(sc->sc_dev,
   5471 		    "unable to load RX control data DMA map, error = %d\n",
   5472 		    error);
   5473 		goto fail_3;
   5474 	}
   5475 
   5476 	return 0;
   5477 
   5478  fail_3:
   5479 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5480  fail_2:
   5481 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5482 	    rxq->rxq_desc_size);
   5483  fail_1:
   5484 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5485  fail_0:
   5486 	return error;
   5487 }
   5488 
   5489 static void
   5490 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5491 {
   5492 
   5493 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5494 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5495 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5496 	    rxq->rxq_desc_size);
   5497 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5498 }
   5499 
   5500 
   5501 static int
   5502 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5503 {
   5504 	int i, error;
   5505 
   5506 	/* Create the transmit buffer DMA maps. */
   5507 	WM_TXQUEUELEN(txq) =
   5508 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5509 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5510 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5511 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5512 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5513 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5514 			aprint_error_dev(sc->sc_dev,
   5515 			    "unable to create Tx DMA map %d, error = %d\n",
   5516 			    i, error);
   5517 			goto fail;
   5518 		}
   5519 	}
   5520 
   5521 	return 0;
   5522 
   5523  fail:
   5524 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5525 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5526 			bus_dmamap_destroy(sc->sc_dmat,
   5527 			    txq->txq_soft[i].txs_dmamap);
   5528 	}
   5529 	return error;
   5530 }
   5531 
   5532 static void
   5533 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5534 {
   5535 	int i;
   5536 
   5537 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5538 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5539 			bus_dmamap_destroy(sc->sc_dmat,
   5540 			    txq->txq_soft[i].txs_dmamap);
   5541 	}
   5542 }
   5543 
   5544 static int
   5545 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5546 {
   5547 	int i, error;
   5548 
   5549 	/* Create the receive buffer DMA maps. */
   5550 	for (i = 0; i < WM_NRXDESC; i++) {
   5551 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5552 			    MCLBYTES, 0, 0,
   5553 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5554 			aprint_error_dev(sc->sc_dev,
   5555 			    "unable to create Rx DMA map %d error = %d\n",
   5556 			    i, error);
   5557 			goto fail;
   5558 		}
   5559 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5560 	}
   5561 
   5562 	return 0;
   5563 
   5564  fail:
   5565 	for (i = 0; i < WM_NRXDESC; i++) {
   5566 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5567 			bus_dmamap_destroy(sc->sc_dmat,
   5568 			    rxq->rxq_soft[i].rxs_dmamap);
   5569 	}
   5570 	return error;
   5571 }
   5572 
   5573 static void
   5574 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5575 {
   5576 	int i;
   5577 
   5578 	for (i = 0; i < WM_NRXDESC; i++) {
   5579 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5580 			bus_dmamap_destroy(sc->sc_dmat,
   5581 			    rxq->rxq_soft[i].rxs_dmamap);
   5582 	}
   5583 }
   5584 
   5585 /*
   5586  * wm_alloc_quques:
   5587  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5588  */
   5589 static int
   5590 wm_alloc_txrx_queues(struct wm_softc *sc)
   5591 {
   5592 	int i, error, tx_done, rx_done;
   5593 
   5594 	/*
   5595 	 * For transmission
   5596 	 */
   5597 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5598 	    KM_SLEEP);
   5599 	if (sc->sc_txq == NULL) {
   5600 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_txqueue\n");
   5601 		error = ENOMEM;
   5602 		goto fail_0;
   5603 	}
   5604 
   5605 	error = 0;
   5606 	tx_done = 0;
   5607 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5608 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5609 		txq->txq_sc = sc;
   5610 #ifdef WM_MPSAFE
   5611 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5612 #else
   5613 		txq->txq_lock = NULL;
   5614 #endif
   5615 		error = wm_alloc_tx_descs(sc, txq);
   5616 		if (error)
   5617 			break;
   5618 		error = wm_alloc_tx_buffer(sc, txq);
   5619 		if (error) {
   5620 			wm_free_tx_descs(sc, txq);
   5621 			break;
   5622 		}
   5623 		tx_done++;
   5624 	}
   5625 	if (error)
   5626 		goto fail_1;
   5627 
   5628 	/*
   5629 	 * For recieve
   5630 	 */
   5631 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5632 	    KM_SLEEP);
   5633 	if (sc->sc_rxq == NULL) {
   5634 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_rxqueue\n");
   5635 		error = ENOMEM;
   5636 		goto fail_1;
   5637 	}
   5638 
   5639 	error = 0;
   5640 	rx_done = 0;
   5641 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5642 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5643 		rxq->rxq_sc = sc;
   5644 #ifdef WM_MPSAFE
   5645 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5646 #else
   5647 		rxq->rxq_lock = NULL;
   5648 #endif
   5649 		error = wm_alloc_rx_descs(sc, rxq);
   5650 		if (error)
   5651 			break;
   5652 
   5653 		error = wm_alloc_rx_buffer(sc, rxq);
   5654 		if (error) {
   5655 			wm_free_rx_descs(sc, rxq);
   5656 			break;
   5657 		}
   5658 
   5659 		rx_done++;
   5660 	}
   5661 	if (error)
   5662 		goto fail_2;
   5663 
   5664 	return 0;
   5665 
   5666  fail_2:
   5667 	for (i = 0; i < rx_done; i++) {
   5668 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5669 		wm_free_rx_buffer(sc, rxq);
   5670 		wm_free_rx_descs(sc, rxq);
   5671 		if (rxq->rxq_lock)
   5672 			mutex_obj_free(rxq->rxq_lock);
   5673 	}
   5674 	kmem_free(sc->sc_rxq,
   5675 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5676  fail_1:
   5677 	for (i = 0; i < tx_done; i++) {
   5678 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5679 		wm_free_tx_buffer(sc, txq);
   5680 		wm_free_tx_descs(sc, txq);
   5681 		if (txq->txq_lock)
   5682 			mutex_obj_free(txq->txq_lock);
   5683 	}
   5684 	kmem_free(sc->sc_txq,
   5685 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5686  fail_0:
   5687 	return error;
   5688 }
   5689 
   5690 /*
   5691  * wm_free_quques:
   5692  *	Free {tx,rx}descs and {tx,rx} buffers
   5693  */
   5694 static void
   5695 wm_free_txrx_queues(struct wm_softc *sc)
   5696 {
   5697 	int i;
   5698 
   5699 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5700 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5701 		wm_free_rx_buffer(sc, rxq);
   5702 		wm_free_rx_descs(sc, rxq);
   5703 		if (rxq->rxq_lock)
   5704 			mutex_obj_free(rxq->rxq_lock);
   5705 	}
   5706 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5707 
   5708 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5709 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5710 		wm_free_tx_buffer(sc, txq);
   5711 		wm_free_tx_descs(sc, txq);
   5712 		if (txq->txq_lock)
   5713 			mutex_obj_free(txq->txq_lock);
   5714 	}
   5715 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5716 }
   5717 
   5718 static void
   5719 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5720 {
   5721 
   5722 	KASSERT(WM_TX_LOCKED(txq));
   5723 
   5724 	/* Initialize the transmit descriptor ring. */
   5725 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5726 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5727 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5728 	txq->txq_free = WM_NTXDESC(txq);
   5729 	txq->txq_next = 0;
   5730 }
   5731 
   5732 static void
   5733 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5734 {
   5735 
   5736 	KASSERT(WM_TX_LOCKED(txq));
   5737 
   5738 	if (sc->sc_type < WM_T_82543) {
   5739 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5740 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5741 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5742 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5743 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5744 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5745 	} else {
   5746 		int qid = txq->txq_id;
   5747 
   5748 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5749 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5750 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5751 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5752 
   5753 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5754 			/*
   5755 			 * Don't write TDT before TCTL.EN is set.
   5756 			 * See the document.
   5757 			 */
   5758 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5759 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5760 			    | TXDCTL_WTHRESH(0));
   5761 		else {
   5762 			/* ITR / 4 */
   5763 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5764 			if (sc->sc_type >= WM_T_82540) {
   5765 				/* should be same */
   5766 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5767 			}
   5768 
   5769 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5770 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5771 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5772 		}
   5773 	}
   5774 }
   5775 
   5776 static void
   5777 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5778 {
   5779 	int i;
   5780 
   5781 	KASSERT(WM_TX_LOCKED(txq));
   5782 
   5783 	/* Initialize the transmit job descriptors. */
   5784 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5785 		txq->txq_soft[i].txs_mbuf = NULL;
   5786 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5787 	txq->txq_snext = 0;
   5788 	txq->txq_sdirty = 0;
   5789 }
   5790 
   5791 static void
   5792 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5793 {
   5794 
   5795 	KASSERT(WM_TX_LOCKED(txq));
   5796 
   5797 	/*
   5798 	 * Set up some register offsets that are different between
   5799 	 * the i82542 and the i82543 and later chips.
   5800 	 */
   5801 	if (sc->sc_type < WM_T_82543)
   5802 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5803 	else
   5804 		txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
   5805 
   5806 	wm_init_tx_descs(sc, txq);
   5807 	wm_init_tx_regs(sc, txq);
   5808 	wm_init_tx_buffer(sc, txq);
   5809 }
   5810 
   5811 static void
   5812 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5813 {
   5814 
   5815 	KASSERT(WM_RX_LOCKED(rxq));
   5816 
   5817 	/*
   5818 	 * Initialize the receive descriptor and receive job
   5819 	 * descriptor rings.
   5820 	 */
   5821 	if (sc->sc_type < WM_T_82543) {
   5822 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5823 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5824 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5825 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5826 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5827 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5828 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5829 
   5830 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5831 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5832 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5833 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5834 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5835 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5836 	} else {
   5837 		int qid = rxq->rxq_id;
   5838 
   5839 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5840 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5841 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5842 
   5843 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5844 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5845 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5846 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5847 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5848 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5849 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5850 			    | RXDCTL_WTHRESH(1));
   5851 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5852 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5853 		} else {
   5854 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5855 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5856 			/* ITR / 4 */
   5857 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5858 			/* MUST be same */
   5859 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5860 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5861 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5862 		}
   5863 	}
   5864 }
   5865 
   5866 static int
   5867 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5868 {
   5869 	struct wm_rxsoft *rxs;
   5870 	int error, i;
   5871 
   5872 	KASSERT(WM_RX_LOCKED(rxq));
   5873 
   5874 	for (i = 0; i < WM_NRXDESC; i++) {
   5875 		rxs = &rxq->rxq_soft[i];
   5876 		if (rxs->rxs_mbuf == NULL) {
   5877 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5878 				log(LOG_ERR, "%s: unable to allocate or map "
   5879 				    "rx buffer %d, error = %d\n",
   5880 				    device_xname(sc->sc_dev), i, error);
   5881 				/*
   5882 				 * XXX Should attempt to run with fewer receive
   5883 				 * XXX buffers instead of just failing.
   5884 				 */
   5885 				wm_rxdrain(rxq);
   5886 				return ENOMEM;
   5887 			}
   5888 		} else {
   5889 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5890 				wm_init_rxdesc(rxq, i);
   5891 			/*
   5892 			 * For 82575 and newer device, the RX descriptors
   5893 			 * must be initialized after the setting of RCTL.EN in
   5894 			 * wm_set_filter()
   5895 			 */
   5896 		}
   5897 	}
   5898 	rxq->rxq_ptr = 0;
   5899 	rxq->rxq_discard = 0;
   5900 	WM_RXCHAIN_RESET(rxq);
   5901 
   5902 	return 0;
   5903 }
   5904 
   5905 static int
   5906 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5907 {
   5908 
   5909 	KASSERT(WM_RX_LOCKED(rxq));
   5910 
   5911 	/*
   5912 	 * Set up some register offsets that are different between
   5913 	 * the i82542 and the i82543 and later chips.
   5914 	 */
   5915 	if (sc->sc_type < WM_T_82543)
   5916 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5917 	else
   5918 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5919 
   5920 	wm_init_rx_regs(sc, rxq);
   5921 	return wm_init_rx_buffer(sc, rxq);
   5922 }
   5923 
   5924 /*
   5925  * wm_init_quques:
   5926  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5927  */
   5928 static int
   5929 wm_init_txrx_queues(struct wm_softc *sc)
   5930 {
   5931 	int i, error;
   5932 
   5933 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5934 		device_xname(sc->sc_dev), __func__));
   5935 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5936 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5937 		WM_TX_LOCK(txq);
   5938 		wm_init_tx_queue(sc, txq);
   5939 		WM_TX_UNLOCK(txq);
   5940 	}
   5941 
   5942 	error = 0;
   5943 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5944 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5945 		WM_RX_LOCK(rxq);
   5946 		error = wm_init_rx_queue(sc, rxq);
   5947 		WM_RX_UNLOCK(rxq);
   5948 		if (error)
   5949 			break;
   5950 	}
   5951 
   5952 	return error;
   5953 }
   5954 
   5955 /*
   5956  * wm_tx_offload:
   5957  *
   5958  *	Set up TCP/IP checksumming parameters for the
   5959  *	specified packet.
   5960  */
   5961 static int
   5962 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5963     uint8_t *fieldsp)
   5964 {
   5965 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5966 	struct mbuf *m0 = txs->txs_mbuf;
   5967 	struct livengood_tcpip_ctxdesc *t;
   5968 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5969 	uint32_t ipcse;
   5970 	struct ether_header *eh;
   5971 	int offset, iphl;
   5972 	uint8_t fields;
   5973 
   5974 	/*
   5975 	 * XXX It would be nice if the mbuf pkthdr had offset
   5976 	 * fields for the protocol headers.
   5977 	 */
   5978 
   5979 	eh = mtod(m0, struct ether_header *);
   5980 	switch (htons(eh->ether_type)) {
   5981 	case ETHERTYPE_IP:
   5982 	case ETHERTYPE_IPV6:
   5983 		offset = ETHER_HDR_LEN;
   5984 		break;
   5985 
   5986 	case ETHERTYPE_VLAN:
   5987 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5988 		break;
   5989 
   5990 	default:
   5991 		/*
   5992 		 * Don't support this protocol or encapsulation.
   5993 		 */
   5994 		*fieldsp = 0;
   5995 		*cmdp = 0;
   5996 		return 0;
   5997 	}
   5998 
   5999 	if ((m0->m_pkthdr.csum_flags &
   6000 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6001 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6002 	} else {
   6003 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6004 	}
   6005 	ipcse = offset + iphl - 1;
   6006 
   6007 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6008 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6009 	seg = 0;
   6010 	fields = 0;
   6011 
   6012 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6013 		int hlen = offset + iphl;
   6014 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6015 
   6016 		if (__predict_false(m0->m_len <
   6017 				    (hlen + sizeof(struct tcphdr)))) {
   6018 			/*
   6019 			 * TCP/IP headers are not in the first mbuf; we need
   6020 			 * to do this the slow and painful way.  Let's just
   6021 			 * hope this doesn't happen very often.
   6022 			 */
   6023 			struct tcphdr th;
   6024 
   6025 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6026 
   6027 			m_copydata(m0, hlen, sizeof(th), &th);
   6028 			if (v4) {
   6029 				struct ip ip;
   6030 
   6031 				m_copydata(m0, offset, sizeof(ip), &ip);
   6032 				ip.ip_len = 0;
   6033 				m_copyback(m0,
   6034 				    offset + offsetof(struct ip, ip_len),
   6035 				    sizeof(ip.ip_len), &ip.ip_len);
   6036 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6037 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6038 			} else {
   6039 				struct ip6_hdr ip6;
   6040 
   6041 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6042 				ip6.ip6_plen = 0;
   6043 				m_copyback(m0,
   6044 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6045 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6046 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6047 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6048 			}
   6049 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6050 			    sizeof(th.th_sum), &th.th_sum);
   6051 
   6052 			hlen += th.th_off << 2;
   6053 		} else {
   6054 			/*
   6055 			 * TCP/IP headers are in the first mbuf; we can do
   6056 			 * this the easy way.
   6057 			 */
   6058 			struct tcphdr *th;
   6059 
   6060 			if (v4) {
   6061 				struct ip *ip =
   6062 				    (void *)(mtod(m0, char *) + offset);
   6063 				th = (void *)(mtod(m0, char *) + hlen);
   6064 
   6065 				ip->ip_len = 0;
   6066 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6067 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6068 			} else {
   6069 				struct ip6_hdr *ip6 =
   6070 				    (void *)(mtod(m0, char *) + offset);
   6071 				th = (void *)(mtod(m0, char *) + hlen);
   6072 
   6073 				ip6->ip6_plen = 0;
   6074 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6075 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6076 			}
   6077 			hlen += th->th_off << 2;
   6078 		}
   6079 
   6080 		if (v4) {
   6081 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6082 			cmdlen |= WTX_TCPIP_CMD_IP;
   6083 		} else {
   6084 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6085 			ipcse = 0;
   6086 		}
   6087 		cmd |= WTX_TCPIP_CMD_TSE;
   6088 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6089 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6090 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6091 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6092 	}
   6093 
   6094 	/*
   6095 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6096 	 * offload feature, if we load the context descriptor, we
   6097 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6098 	 */
   6099 
   6100 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6101 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6102 	    WTX_TCPIP_IPCSE(ipcse);
   6103 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6104 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   6105 		fields |= WTX_IXSM;
   6106 	}
   6107 
   6108 	offset += iphl;
   6109 
   6110 	if (m0->m_pkthdr.csum_flags &
   6111 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6112 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6113 		fields |= WTX_TXSM;
   6114 		tucs = WTX_TCPIP_TUCSS(offset) |
   6115 		    WTX_TCPIP_TUCSO(offset +
   6116 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6117 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6118 	} else if ((m0->m_pkthdr.csum_flags &
   6119 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6120 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6121 		fields |= WTX_TXSM;
   6122 		tucs = WTX_TCPIP_TUCSS(offset) |
   6123 		    WTX_TCPIP_TUCSO(offset +
   6124 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6125 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6126 	} else {
   6127 		/* Just initialize it to a valid TCP context. */
   6128 		tucs = WTX_TCPIP_TUCSS(offset) |
   6129 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6130 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6131 	}
   6132 
   6133 	/* Fill in the context descriptor. */
   6134 	t = (struct livengood_tcpip_ctxdesc *)
   6135 	    &txq->txq_descs[txq->txq_next];
   6136 	t->tcpip_ipcs = htole32(ipcs);
   6137 	t->tcpip_tucs = htole32(tucs);
   6138 	t->tcpip_cmdlen = htole32(cmdlen);
   6139 	t->tcpip_seg = htole32(seg);
   6140 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6141 
   6142 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6143 	txs->txs_ndesc++;
   6144 
   6145 	*cmdp = cmd;
   6146 	*fieldsp = fields;
   6147 
   6148 	return 0;
   6149 }
   6150 
   6151 /*
   6152  * wm_start:		[ifnet interface function]
   6153  *
   6154  *	Start packet transmission on the interface.
   6155  */
   6156 static void
   6157 wm_start(struct ifnet *ifp)
   6158 {
   6159 	struct wm_softc *sc = ifp->if_softc;
   6160 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6161 
   6162 	WM_TX_LOCK(txq);
   6163 	if (!sc->sc_stopping)
   6164 		wm_start_locked(ifp);
   6165 	WM_TX_UNLOCK(txq);
   6166 }
   6167 
   6168 static void
   6169 wm_start_locked(struct ifnet *ifp)
   6170 {
   6171 	struct wm_softc *sc = ifp->if_softc;
   6172 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6173 	struct mbuf *m0;
   6174 	struct m_tag *mtag;
   6175 	struct wm_txsoft *txs;
   6176 	bus_dmamap_t dmamap;
   6177 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6178 	bus_addr_t curaddr;
   6179 	bus_size_t seglen, curlen;
   6180 	uint32_t cksumcmd;
   6181 	uint8_t cksumfields;
   6182 
   6183 	KASSERT(WM_TX_LOCKED(txq));
   6184 
   6185 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6186 		return;
   6187 
   6188 	/* Remember the previous number of free descriptors. */
   6189 	ofree = txq->txq_free;
   6190 
   6191 	/*
   6192 	 * Loop through the send queue, setting up transmit descriptors
   6193 	 * until we drain the queue, or use up all available transmit
   6194 	 * descriptors.
   6195 	 */
   6196 	for (;;) {
   6197 		m0 = NULL;
   6198 
   6199 		/* Get a work queue entry. */
   6200 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6201 			wm_txeof(sc);
   6202 			if (txq->txq_sfree == 0) {
   6203 				DPRINTF(WM_DEBUG_TX,
   6204 				    ("%s: TX: no free job descriptors\n",
   6205 					device_xname(sc->sc_dev)));
   6206 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6207 				break;
   6208 			}
   6209 		}
   6210 
   6211 		/* Grab a packet off the queue. */
   6212 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6213 		if (m0 == NULL)
   6214 			break;
   6215 
   6216 		DPRINTF(WM_DEBUG_TX,
   6217 		    ("%s: TX: have packet to transmit: %p\n",
   6218 		    device_xname(sc->sc_dev), m0));
   6219 
   6220 		txs = &txq->txq_soft[txq->txq_snext];
   6221 		dmamap = txs->txs_dmamap;
   6222 
   6223 		use_tso = (m0->m_pkthdr.csum_flags &
   6224 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6225 
   6226 		/*
   6227 		 * So says the Linux driver:
   6228 		 * The controller does a simple calculation to make sure
   6229 		 * there is enough room in the FIFO before initiating the
   6230 		 * DMA for each buffer.  The calc is:
   6231 		 *	4 = ceil(buffer len / MSS)
   6232 		 * To make sure we don't overrun the FIFO, adjust the max
   6233 		 * buffer len if the MSS drops.
   6234 		 */
   6235 		dmamap->dm_maxsegsz =
   6236 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6237 		    ? m0->m_pkthdr.segsz << 2
   6238 		    : WTX_MAX_LEN;
   6239 
   6240 		/*
   6241 		 * Load the DMA map.  If this fails, the packet either
   6242 		 * didn't fit in the allotted number of segments, or we
   6243 		 * were short on resources.  For the too-many-segments
   6244 		 * case, we simply report an error and drop the packet,
   6245 		 * since we can't sanely copy a jumbo packet to a single
   6246 		 * buffer.
   6247 		 */
   6248 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6249 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6250 		if (error) {
   6251 			if (error == EFBIG) {
   6252 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6253 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6254 				    "DMA segments, dropping...\n",
   6255 				    device_xname(sc->sc_dev));
   6256 				wm_dump_mbuf_chain(sc, m0);
   6257 				m_freem(m0);
   6258 				continue;
   6259 			}
   6260 			/*  Short on resources, just stop for now. */
   6261 			DPRINTF(WM_DEBUG_TX,
   6262 			    ("%s: TX: dmamap load failed: %d\n",
   6263 			    device_xname(sc->sc_dev), error));
   6264 			break;
   6265 		}
   6266 
   6267 		segs_needed = dmamap->dm_nsegs;
   6268 		if (use_tso) {
   6269 			/* For sentinel descriptor; see below. */
   6270 			segs_needed++;
   6271 		}
   6272 
   6273 		/*
   6274 		 * Ensure we have enough descriptors free to describe
   6275 		 * the packet.  Note, we always reserve one descriptor
   6276 		 * at the end of the ring due to the semantics of the
   6277 		 * TDT register, plus one more in the event we need
   6278 		 * to load offload context.
   6279 		 */
   6280 		if (segs_needed > txq->txq_free - 2) {
   6281 			/*
   6282 			 * Not enough free descriptors to transmit this
   6283 			 * packet.  We haven't committed anything yet,
   6284 			 * so just unload the DMA map, put the packet
   6285 			 * pack on the queue, and punt.  Notify the upper
   6286 			 * layer that there are no more slots left.
   6287 			 */
   6288 			DPRINTF(WM_DEBUG_TX,
   6289 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6290 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6291 			    segs_needed, txq->txq_free - 1));
   6292 			ifp->if_flags |= IFF_OACTIVE;
   6293 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6294 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6295 			break;
   6296 		}
   6297 
   6298 		/*
   6299 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6300 		 * once we know we can transmit the packet, since we
   6301 		 * do some internal FIFO space accounting here.
   6302 		 */
   6303 		if (sc->sc_type == WM_T_82547 &&
   6304 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6305 			DPRINTF(WM_DEBUG_TX,
   6306 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6307 			    device_xname(sc->sc_dev)));
   6308 			ifp->if_flags |= IFF_OACTIVE;
   6309 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6310 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6311 			break;
   6312 		}
   6313 
   6314 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6315 
   6316 		DPRINTF(WM_DEBUG_TX,
   6317 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6318 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6319 
   6320 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6321 
   6322 		/*
   6323 		 * Store a pointer to the packet so that we can free it
   6324 		 * later.
   6325 		 *
   6326 		 * Initially, we consider the number of descriptors the
   6327 		 * packet uses the number of DMA segments.  This may be
   6328 		 * incremented by 1 if we do checksum offload (a descriptor
   6329 		 * is used to set the checksum context).
   6330 		 */
   6331 		txs->txs_mbuf = m0;
   6332 		txs->txs_firstdesc = txq->txq_next;
   6333 		txs->txs_ndesc = segs_needed;
   6334 
   6335 		/* Set up offload parameters for this packet. */
   6336 		if (m0->m_pkthdr.csum_flags &
   6337 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6338 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6339 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6340 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6341 					  &cksumfields) != 0) {
   6342 				/* Error message already displayed. */
   6343 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6344 				continue;
   6345 			}
   6346 		} else {
   6347 			cksumcmd = 0;
   6348 			cksumfields = 0;
   6349 		}
   6350 
   6351 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6352 
   6353 		/* Sync the DMA map. */
   6354 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6355 		    BUS_DMASYNC_PREWRITE);
   6356 
   6357 		/* Initialize the transmit descriptor. */
   6358 		for (nexttx = txq->txq_next, seg = 0;
   6359 		     seg < dmamap->dm_nsegs; seg++) {
   6360 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6361 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6362 			     seglen != 0;
   6363 			     curaddr += curlen, seglen -= curlen,
   6364 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6365 				curlen = seglen;
   6366 
   6367 				/*
   6368 				 * So says the Linux driver:
   6369 				 * Work around for premature descriptor
   6370 				 * write-backs in TSO mode.  Append a
   6371 				 * 4-byte sentinel descriptor.
   6372 				 */
   6373 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6374 				    curlen > 8)
   6375 					curlen -= 4;
   6376 
   6377 				wm_set_dma_addr(
   6378 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6379 				txq->txq_descs[nexttx].wtx_cmdlen
   6380 				    = htole32(cksumcmd | curlen);
   6381 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6382 				    = 0;
   6383 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6384 				    = cksumfields;
   6385 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6386 				lasttx = nexttx;
   6387 
   6388 				DPRINTF(WM_DEBUG_TX,
   6389 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6390 				     "len %#04zx\n",
   6391 				    device_xname(sc->sc_dev), nexttx,
   6392 				    (uint64_t)curaddr, curlen));
   6393 			}
   6394 		}
   6395 
   6396 		KASSERT(lasttx != -1);
   6397 
   6398 		/*
   6399 		 * Set up the command byte on the last descriptor of
   6400 		 * the packet.  If we're in the interrupt delay window,
   6401 		 * delay the interrupt.
   6402 		 */
   6403 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6404 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6405 
   6406 		/*
   6407 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6408 		 * up the descriptor to encapsulate the packet for us.
   6409 		 *
   6410 		 * This is only valid on the last descriptor of the packet.
   6411 		 */
   6412 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6413 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6414 			    htole32(WTX_CMD_VLE);
   6415 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6416 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6417 		}
   6418 
   6419 		txs->txs_lastdesc = lasttx;
   6420 
   6421 		DPRINTF(WM_DEBUG_TX,
   6422 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6423 		    device_xname(sc->sc_dev),
   6424 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6425 
   6426 		/* Sync the descriptors we're using. */
   6427 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6428 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6429 
   6430 		/* Give the packet to the chip. */
   6431 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6432 
   6433 		DPRINTF(WM_DEBUG_TX,
   6434 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6435 
   6436 		DPRINTF(WM_DEBUG_TX,
   6437 		    ("%s: TX: finished transmitting packet, job %d\n",
   6438 		    device_xname(sc->sc_dev), txq->txq_snext));
   6439 
   6440 		/* Advance the tx pointer. */
   6441 		txq->txq_free -= txs->txs_ndesc;
   6442 		txq->txq_next = nexttx;
   6443 
   6444 		txq->txq_sfree--;
   6445 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6446 
   6447 		/* Pass the packet to any BPF listeners. */
   6448 		bpf_mtap(ifp, m0);
   6449 	}
   6450 
   6451 	if (m0 != NULL) {
   6452 		ifp->if_flags |= IFF_OACTIVE;
   6453 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6454 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6455 			__func__));
   6456 		m_freem(m0);
   6457 	}
   6458 
   6459 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6460 		/* No more slots; notify upper layer. */
   6461 		ifp->if_flags |= IFF_OACTIVE;
   6462 	}
   6463 
   6464 	if (txq->txq_free != ofree) {
   6465 		/* Set a watchdog timer in case the chip flakes out. */
   6466 		ifp->if_timer = 5;
   6467 	}
   6468 }
   6469 
   6470 /*
   6471  * wm_nq_tx_offload:
   6472  *
   6473  *	Set up TCP/IP checksumming parameters for the
   6474  *	specified packet, for NEWQUEUE devices
   6475  */
   6476 static int
   6477 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6478     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6479 {
   6480 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6481 	struct mbuf *m0 = txs->txs_mbuf;
   6482 	struct m_tag *mtag;
   6483 	uint32_t vl_len, mssidx, cmdc;
   6484 	struct ether_header *eh;
   6485 	int offset, iphl;
   6486 
   6487 	/*
   6488 	 * XXX It would be nice if the mbuf pkthdr had offset
   6489 	 * fields for the protocol headers.
   6490 	 */
   6491 	*cmdlenp = 0;
   6492 	*fieldsp = 0;
   6493 
   6494 	eh = mtod(m0, struct ether_header *);
   6495 	switch (htons(eh->ether_type)) {
   6496 	case ETHERTYPE_IP:
   6497 	case ETHERTYPE_IPV6:
   6498 		offset = ETHER_HDR_LEN;
   6499 		break;
   6500 
   6501 	case ETHERTYPE_VLAN:
   6502 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6503 		break;
   6504 
   6505 	default:
   6506 		/* Don't support this protocol or encapsulation. */
   6507 		*do_csum = false;
   6508 		return 0;
   6509 	}
   6510 	*do_csum = true;
   6511 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6512 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6513 
   6514 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6515 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6516 
   6517 	if ((m0->m_pkthdr.csum_flags &
   6518 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6519 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6520 	} else {
   6521 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6522 	}
   6523 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6524 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6525 
   6526 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6527 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6528 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6529 		*cmdlenp |= NQTX_CMD_VLE;
   6530 	}
   6531 
   6532 	mssidx = 0;
   6533 
   6534 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6535 		int hlen = offset + iphl;
   6536 		int tcp_hlen;
   6537 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6538 
   6539 		if (__predict_false(m0->m_len <
   6540 				    (hlen + sizeof(struct tcphdr)))) {
   6541 			/*
   6542 			 * TCP/IP headers are not in the first mbuf; we need
   6543 			 * to do this the slow and painful way.  Let's just
   6544 			 * hope this doesn't happen very often.
   6545 			 */
   6546 			struct tcphdr th;
   6547 
   6548 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6549 
   6550 			m_copydata(m0, hlen, sizeof(th), &th);
   6551 			if (v4) {
   6552 				struct ip ip;
   6553 
   6554 				m_copydata(m0, offset, sizeof(ip), &ip);
   6555 				ip.ip_len = 0;
   6556 				m_copyback(m0,
   6557 				    offset + offsetof(struct ip, ip_len),
   6558 				    sizeof(ip.ip_len), &ip.ip_len);
   6559 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6560 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6561 			} else {
   6562 				struct ip6_hdr ip6;
   6563 
   6564 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6565 				ip6.ip6_plen = 0;
   6566 				m_copyback(m0,
   6567 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6568 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6569 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6570 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6571 			}
   6572 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6573 			    sizeof(th.th_sum), &th.th_sum);
   6574 
   6575 			tcp_hlen = th.th_off << 2;
   6576 		} else {
   6577 			/*
   6578 			 * TCP/IP headers are in the first mbuf; we can do
   6579 			 * this the easy way.
   6580 			 */
   6581 			struct tcphdr *th;
   6582 
   6583 			if (v4) {
   6584 				struct ip *ip =
   6585 				    (void *)(mtod(m0, char *) + offset);
   6586 				th = (void *)(mtod(m0, char *) + hlen);
   6587 
   6588 				ip->ip_len = 0;
   6589 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6590 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6591 			} else {
   6592 				struct ip6_hdr *ip6 =
   6593 				    (void *)(mtod(m0, char *) + offset);
   6594 				th = (void *)(mtod(m0, char *) + hlen);
   6595 
   6596 				ip6->ip6_plen = 0;
   6597 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6598 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6599 			}
   6600 			tcp_hlen = th->th_off << 2;
   6601 		}
   6602 		hlen += tcp_hlen;
   6603 		*cmdlenp |= NQTX_CMD_TSE;
   6604 
   6605 		if (v4) {
   6606 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6607 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6608 		} else {
   6609 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6610 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6611 		}
   6612 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6613 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6614 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6615 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6616 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6617 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6618 	} else {
   6619 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6620 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6621 	}
   6622 
   6623 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6624 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6625 		cmdc |= NQTXC_CMD_IP4;
   6626 	}
   6627 
   6628 	if (m0->m_pkthdr.csum_flags &
   6629 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6630 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6631 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6632 			cmdc |= NQTXC_CMD_TCP;
   6633 		} else {
   6634 			cmdc |= NQTXC_CMD_UDP;
   6635 		}
   6636 		cmdc |= NQTXC_CMD_IP4;
   6637 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6638 	}
   6639 	if (m0->m_pkthdr.csum_flags &
   6640 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6641 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6642 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6643 			cmdc |= NQTXC_CMD_TCP;
   6644 		} else {
   6645 			cmdc |= NQTXC_CMD_UDP;
   6646 		}
   6647 		cmdc |= NQTXC_CMD_IP6;
   6648 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6649 	}
   6650 
   6651 	/* Fill in the context descriptor. */
   6652 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6653 	    htole32(vl_len);
   6654 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6655 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6656 	    htole32(cmdc);
   6657 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6658 	    htole32(mssidx);
   6659 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6660 	DPRINTF(WM_DEBUG_TX,
   6661 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6662 	    txq->txq_next, 0, vl_len));
   6663 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6664 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6665 	txs->txs_ndesc++;
   6666 	return 0;
   6667 }
   6668 
   6669 /*
   6670  * wm_nq_start:		[ifnet interface function]
   6671  *
   6672  *	Start packet transmission on the interface for NEWQUEUE devices
   6673  */
   6674 static void
   6675 wm_nq_start(struct ifnet *ifp)
   6676 {
   6677 	struct wm_softc *sc = ifp->if_softc;
   6678 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6679 
   6680 	WM_TX_LOCK(txq);
   6681 	if (!sc->sc_stopping)
   6682 		wm_nq_start_locked(ifp);
   6683 	WM_TX_UNLOCK(txq);
   6684 }
   6685 
   6686 static void
   6687 wm_nq_start_locked(struct ifnet *ifp)
   6688 {
   6689 	struct wm_softc *sc = ifp->if_softc;
   6690 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6691 	struct mbuf *m0;
   6692 	struct m_tag *mtag;
   6693 	struct wm_txsoft *txs;
   6694 	bus_dmamap_t dmamap;
   6695 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6696 	bool do_csum, sent;
   6697 
   6698 	KASSERT(WM_TX_LOCKED(txq));
   6699 
   6700 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6701 		return;
   6702 
   6703 	sent = false;
   6704 
   6705 	/*
   6706 	 * Loop through the send queue, setting up transmit descriptors
   6707 	 * until we drain the queue, or use up all available transmit
   6708 	 * descriptors.
   6709 	 */
   6710 	for (;;) {
   6711 		m0 = NULL;
   6712 
   6713 		/* Get a work queue entry. */
   6714 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6715 			wm_txeof(sc);
   6716 			if (txq->txq_sfree == 0) {
   6717 				DPRINTF(WM_DEBUG_TX,
   6718 				    ("%s: TX: no free job descriptors\n",
   6719 					device_xname(sc->sc_dev)));
   6720 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6721 				break;
   6722 			}
   6723 		}
   6724 
   6725 		/* Grab a packet off the queue. */
   6726 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6727 		if (m0 == NULL)
   6728 			break;
   6729 
   6730 		DPRINTF(WM_DEBUG_TX,
   6731 		    ("%s: TX: have packet to transmit: %p\n",
   6732 		    device_xname(sc->sc_dev), m0));
   6733 
   6734 		txs = &txq->txq_soft[txq->txq_snext];
   6735 		dmamap = txs->txs_dmamap;
   6736 
   6737 		/*
   6738 		 * Load the DMA map.  If this fails, the packet either
   6739 		 * didn't fit in the allotted number of segments, or we
   6740 		 * were short on resources.  For the too-many-segments
   6741 		 * case, we simply report an error and drop the packet,
   6742 		 * since we can't sanely copy a jumbo packet to a single
   6743 		 * buffer.
   6744 		 */
   6745 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6746 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6747 		if (error) {
   6748 			if (error == EFBIG) {
   6749 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6750 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6751 				    "DMA segments, dropping...\n",
   6752 				    device_xname(sc->sc_dev));
   6753 				wm_dump_mbuf_chain(sc, m0);
   6754 				m_freem(m0);
   6755 				continue;
   6756 			}
   6757 			/* Short on resources, just stop for now. */
   6758 			DPRINTF(WM_DEBUG_TX,
   6759 			    ("%s: TX: dmamap load failed: %d\n",
   6760 			    device_xname(sc->sc_dev), error));
   6761 			break;
   6762 		}
   6763 
   6764 		segs_needed = dmamap->dm_nsegs;
   6765 
   6766 		/*
   6767 		 * Ensure we have enough descriptors free to describe
   6768 		 * the packet.  Note, we always reserve one descriptor
   6769 		 * at the end of the ring due to the semantics of the
   6770 		 * TDT register, plus one more in the event we need
   6771 		 * to load offload context.
   6772 		 */
   6773 		if (segs_needed > txq->txq_free - 2) {
   6774 			/*
   6775 			 * Not enough free descriptors to transmit this
   6776 			 * packet.  We haven't committed anything yet,
   6777 			 * so just unload the DMA map, put the packet
   6778 			 * pack on the queue, and punt.  Notify the upper
   6779 			 * layer that there are no more slots left.
   6780 			 */
   6781 			DPRINTF(WM_DEBUG_TX,
   6782 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6783 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6784 			    segs_needed, txq->txq_free - 1));
   6785 			ifp->if_flags |= IFF_OACTIVE;
   6786 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6787 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6788 			break;
   6789 		}
   6790 
   6791 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6792 
   6793 		DPRINTF(WM_DEBUG_TX,
   6794 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6795 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6796 
   6797 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6798 
   6799 		/*
   6800 		 * Store a pointer to the packet so that we can free it
   6801 		 * later.
   6802 		 *
   6803 		 * Initially, we consider the number of descriptors the
   6804 		 * packet uses the number of DMA segments.  This may be
   6805 		 * incremented by 1 if we do checksum offload (a descriptor
   6806 		 * is used to set the checksum context).
   6807 		 */
   6808 		txs->txs_mbuf = m0;
   6809 		txs->txs_firstdesc = txq->txq_next;
   6810 		txs->txs_ndesc = segs_needed;
   6811 
   6812 		/* Set up offload parameters for this packet. */
   6813 		uint32_t cmdlen, fields, dcmdlen;
   6814 		if (m0->m_pkthdr.csum_flags &
   6815 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6816 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6817 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6818 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6819 			    &do_csum) != 0) {
   6820 				/* Error message already displayed. */
   6821 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6822 				continue;
   6823 			}
   6824 		} else {
   6825 			do_csum = false;
   6826 			cmdlen = 0;
   6827 			fields = 0;
   6828 		}
   6829 
   6830 		/* Sync the DMA map. */
   6831 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6832 		    BUS_DMASYNC_PREWRITE);
   6833 
   6834 		/* Initialize the first transmit descriptor. */
   6835 		nexttx = txq->txq_next;
   6836 		if (!do_csum) {
   6837 			/* setup a legacy descriptor */
   6838 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6839 			    dmamap->dm_segs[0].ds_addr);
   6840 			txq->txq_descs[nexttx].wtx_cmdlen =
   6841 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6842 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6843 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6844 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6845 			    NULL) {
   6846 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6847 				    htole32(WTX_CMD_VLE);
   6848 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6849 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6850 			} else {
   6851 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6852 			}
   6853 			dcmdlen = 0;
   6854 		} else {
   6855 			/* setup an advanced data descriptor */
   6856 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6857 			    htole64(dmamap->dm_segs[0].ds_addr);
   6858 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6859 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6860 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6861 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6862 			    htole32(fields);
   6863 			DPRINTF(WM_DEBUG_TX,
   6864 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6865 			    device_xname(sc->sc_dev), nexttx,
   6866 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6867 			DPRINTF(WM_DEBUG_TX,
   6868 			    ("\t 0x%08x%08x\n", fields,
   6869 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6870 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6871 		}
   6872 
   6873 		lasttx = nexttx;
   6874 		nexttx = WM_NEXTTX(txq, nexttx);
   6875 		/*
   6876 		 * fill in the next descriptors. legacy or adcanced format
   6877 		 * is the same here
   6878 		 */
   6879 		for (seg = 1; seg < dmamap->dm_nsegs;
   6880 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6881 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6882 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6883 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6884 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6885 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6886 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6887 			lasttx = nexttx;
   6888 
   6889 			DPRINTF(WM_DEBUG_TX,
   6890 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6891 			     "len %#04zx\n",
   6892 			    device_xname(sc->sc_dev), nexttx,
   6893 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6894 			    dmamap->dm_segs[seg].ds_len));
   6895 		}
   6896 
   6897 		KASSERT(lasttx != -1);
   6898 
   6899 		/*
   6900 		 * Set up the command byte on the last descriptor of
   6901 		 * the packet.  If we're in the interrupt delay window,
   6902 		 * delay the interrupt.
   6903 		 */
   6904 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6905 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6906 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6907 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6908 
   6909 		txs->txs_lastdesc = lasttx;
   6910 
   6911 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6912 		    device_xname(sc->sc_dev),
   6913 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6914 
   6915 		/* Sync the descriptors we're using. */
   6916 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6917 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6918 
   6919 		/* Give the packet to the chip. */
   6920 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6921 		sent = true;
   6922 
   6923 		DPRINTF(WM_DEBUG_TX,
   6924 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6925 
   6926 		DPRINTF(WM_DEBUG_TX,
   6927 		    ("%s: TX: finished transmitting packet, job %d\n",
   6928 		    device_xname(sc->sc_dev), txq->txq_snext));
   6929 
   6930 		/* Advance the tx pointer. */
   6931 		txq->txq_free -= txs->txs_ndesc;
   6932 		txq->txq_next = nexttx;
   6933 
   6934 		txq->txq_sfree--;
   6935 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6936 
   6937 		/* Pass the packet to any BPF listeners. */
   6938 		bpf_mtap(ifp, m0);
   6939 	}
   6940 
   6941 	if (m0 != NULL) {
   6942 		ifp->if_flags |= IFF_OACTIVE;
   6943 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6944 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6945 			__func__));
   6946 		m_freem(m0);
   6947 	}
   6948 
   6949 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6950 		/* No more slots; notify upper layer. */
   6951 		ifp->if_flags |= IFF_OACTIVE;
   6952 	}
   6953 
   6954 	if (sent) {
   6955 		/* Set a watchdog timer in case the chip flakes out. */
   6956 		ifp->if_timer = 5;
   6957 	}
   6958 }
   6959 
   6960 /* Interrupt */
   6961 
   6962 /*
   6963  * wm_txeof:
   6964  *
   6965  *	Helper; handle transmit interrupts.
   6966  */
   6967 static int
   6968 wm_txeof(struct wm_softc *sc)
   6969 {
   6970 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6971 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6972 	struct wm_txsoft *txs;
   6973 	bool processed = false;
   6974 	int count = 0;
   6975 	int i;
   6976 	uint8_t status;
   6977 
   6978 	if (sc->sc_stopping)
   6979 		return 0;
   6980 
   6981 	ifp->if_flags &= ~IFF_OACTIVE;
   6982 
   6983 	/*
   6984 	 * Go through the Tx list and free mbufs for those
   6985 	 * frames which have been transmitted.
   6986 	 */
   6987 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6988 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6989 		txs = &txq->txq_soft[i];
   6990 
   6991 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   6992 			device_xname(sc->sc_dev), i));
   6993 
   6994 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6995 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   6996 
   6997 		status =
   6998 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6999 		if ((status & WTX_ST_DD) == 0) {
   7000 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7001 			    BUS_DMASYNC_PREREAD);
   7002 			break;
   7003 		}
   7004 
   7005 		processed = true;
   7006 		count++;
   7007 		DPRINTF(WM_DEBUG_TX,
   7008 		    ("%s: TX: job %d done: descs %d..%d\n",
   7009 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7010 		    txs->txs_lastdesc));
   7011 
   7012 		/*
   7013 		 * XXX We should probably be using the statistics
   7014 		 * XXX registers, but I don't know if they exist
   7015 		 * XXX on chips before the i82544.
   7016 		 */
   7017 
   7018 #ifdef WM_EVENT_COUNTERS
   7019 		if (status & WTX_ST_TU)
   7020 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   7021 #endif /* WM_EVENT_COUNTERS */
   7022 
   7023 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7024 			ifp->if_oerrors++;
   7025 			if (status & WTX_ST_LC)
   7026 				log(LOG_WARNING, "%s: late collision\n",
   7027 				    device_xname(sc->sc_dev));
   7028 			else if (status & WTX_ST_EC) {
   7029 				ifp->if_collisions += 16;
   7030 				log(LOG_WARNING, "%s: excessive collisions\n",
   7031 				    device_xname(sc->sc_dev));
   7032 			}
   7033 		} else
   7034 			ifp->if_opackets++;
   7035 
   7036 		txq->txq_free += txs->txs_ndesc;
   7037 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7038 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7039 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7040 		m_freem(txs->txs_mbuf);
   7041 		txs->txs_mbuf = NULL;
   7042 	}
   7043 
   7044 	/* Update the dirty transmit buffer pointer. */
   7045 	txq->txq_sdirty = i;
   7046 	DPRINTF(WM_DEBUG_TX,
   7047 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7048 
   7049 	if (count != 0)
   7050 		rnd_add_uint32(&sc->rnd_source, count);
   7051 
   7052 	/*
   7053 	 * If there are no more pending transmissions, cancel the watchdog
   7054 	 * timer.
   7055 	 */
   7056 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7057 		ifp->if_timer = 0;
   7058 
   7059 	return processed;
   7060 }
   7061 
   7062 /*
   7063  * wm_rxeof:
   7064  *
   7065  *	Helper; handle receive interrupts.
   7066  */
   7067 static void
   7068 wm_rxeof(struct wm_rxqueue *rxq)
   7069 {
   7070 	struct wm_softc *sc = rxq->rxq_sc;
   7071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7072 	struct wm_rxsoft *rxs;
   7073 	struct mbuf *m;
   7074 	int i, len;
   7075 	int count = 0;
   7076 	uint8_t status, errors;
   7077 	uint16_t vlantag;
   7078 
   7079 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7080 		rxs = &rxq->rxq_soft[i];
   7081 
   7082 		DPRINTF(WM_DEBUG_RX,
   7083 		    ("%s: RX: checking descriptor %d\n",
   7084 		    device_xname(sc->sc_dev), i));
   7085 
   7086 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7087 
   7088 		status = rxq->rxq_descs[i].wrx_status;
   7089 		errors = rxq->rxq_descs[i].wrx_errors;
   7090 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7091 		vlantag = rxq->rxq_descs[i].wrx_special;
   7092 
   7093 		if ((status & WRX_ST_DD) == 0) {
   7094 			/* We have processed all of the receive descriptors. */
   7095 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7096 			break;
   7097 		}
   7098 
   7099 		count++;
   7100 		if (__predict_false(rxq->rxq_discard)) {
   7101 			DPRINTF(WM_DEBUG_RX,
   7102 			    ("%s: RX: discarding contents of descriptor %d\n",
   7103 			    device_xname(sc->sc_dev), i));
   7104 			wm_init_rxdesc(rxq, i);
   7105 			if (status & WRX_ST_EOP) {
   7106 				/* Reset our state. */
   7107 				DPRINTF(WM_DEBUG_RX,
   7108 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7109 				    device_xname(sc->sc_dev)));
   7110 				rxq->rxq_discard = 0;
   7111 			}
   7112 			continue;
   7113 		}
   7114 
   7115 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7116 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7117 
   7118 		m = rxs->rxs_mbuf;
   7119 
   7120 		/*
   7121 		 * Add a new receive buffer to the ring, unless of
   7122 		 * course the length is zero. Treat the latter as a
   7123 		 * failed mapping.
   7124 		 */
   7125 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7126 			/*
   7127 			 * Failed, throw away what we've done so
   7128 			 * far, and discard the rest of the packet.
   7129 			 */
   7130 			ifp->if_ierrors++;
   7131 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7132 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7133 			wm_init_rxdesc(rxq, i);
   7134 			if ((status & WRX_ST_EOP) == 0)
   7135 				rxq->rxq_discard = 1;
   7136 			if (rxq->rxq_head != NULL)
   7137 				m_freem(rxq->rxq_head);
   7138 			WM_RXCHAIN_RESET(rxq);
   7139 			DPRINTF(WM_DEBUG_RX,
   7140 			    ("%s: RX: Rx buffer allocation failed, "
   7141 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7142 			    rxq->rxq_discard ? " (discard)" : ""));
   7143 			continue;
   7144 		}
   7145 
   7146 		m->m_len = len;
   7147 		rxq->rxq_len += len;
   7148 		DPRINTF(WM_DEBUG_RX,
   7149 		    ("%s: RX: buffer at %p len %d\n",
   7150 		    device_xname(sc->sc_dev), m->m_data, len));
   7151 
   7152 		/* If this is not the end of the packet, keep looking. */
   7153 		if ((status & WRX_ST_EOP) == 0) {
   7154 			WM_RXCHAIN_LINK(rxq, m);
   7155 			DPRINTF(WM_DEBUG_RX,
   7156 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7157 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7158 			continue;
   7159 		}
   7160 
   7161 		/*
   7162 		 * Okay, we have the entire packet now.  The chip is
   7163 		 * configured to include the FCS except I350 and I21[01]
   7164 		 * (not all chips can be configured to strip it),
   7165 		 * so we need to trim it.
   7166 		 * May need to adjust length of previous mbuf in the
   7167 		 * chain if the current mbuf is too short.
   7168 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7169 		 * is always set in I350, so we don't trim it.
   7170 		 */
   7171 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7172 		    && (sc->sc_type != WM_T_I210)
   7173 		    && (sc->sc_type != WM_T_I211)) {
   7174 			if (m->m_len < ETHER_CRC_LEN) {
   7175 				rxq->rxq_tail->m_len
   7176 				    -= (ETHER_CRC_LEN - m->m_len);
   7177 				m->m_len = 0;
   7178 			} else
   7179 				m->m_len -= ETHER_CRC_LEN;
   7180 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7181 		} else
   7182 			len = rxq->rxq_len;
   7183 
   7184 		WM_RXCHAIN_LINK(rxq, m);
   7185 
   7186 		*rxq->rxq_tailp = NULL;
   7187 		m = rxq->rxq_head;
   7188 
   7189 		WM_RXCHAIN_RESET(rxq);
   7190 
   7191 		DPRINTF(WM_DEBUG_RX,
   7192 		    ("%s: RX: have entire packet, len -> %d\n",
   7193 		    device_xname(sc->sc_dev), len));
   7194 
   7195 		/* If an error occurred, update stats and drop the packet. */
   7196 		if (errors &
   7197 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7198 			if (errors & WRX_ER_SE)
   7199 				log(LOG_WARNING, "%s: symbol error\n",
   7200 				    device_xname(sc->sc_dev));
   7201 			else if (errors & WRX_ER_SEQ)
   7202 				log(LOG_WARNING, "%s: receive sequence error\n",
   7203 				    device_xname(sc->sc_dev));
   7204 			else if (errors & WRX_ER_CE)
   7205 				log(LOG_WARNING, "%s: CRC error\n",
   7206 				    device_xname(sc->sc_dev));
   7207 			m_freem(m);
   7208 			continue;
   7209 		}
   7210 
   7211 		/* No errors.  Receive the packet. */
   7212 		m->m_pkthdr.rcvif = ifp;
   7213 		m->m_pkthdr.len = len;
   7214 
   7215 		/*
   7216 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7217 		 * for us.  Associate the tag with the packet.
   7218 		 */
   7219 		/* XXXX should check for i350 and i354 */
   7220 		if ((status & WRX_ST_VP) != 0) {
   7221 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7222 		}
   7223 
   7224 		/* Set up checksum info for this packet. */
   7225 		if ((status & WRX_ST_IXSM) == 0) {
   7226 			if (status & WRX_ST_IPCS) {
   7227 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7228 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7229 				if (errors & WRX_ER_IPE)
   7230 					m->m_pkthdr.csum_flags |=
   7231 					    M_CSUM_IPv4_BAD;
   7232 			}
   7233 			if (status & WRX_ST_TCPCS) {
   7234 				/*
   7235 				 * Note: we don't know if this was TCP or UDP,
   7236 				 * so we just set both bits, and expect the
   7237 				 * upper layers to deal.
   7238 				 */
   7239 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7240 				m->m_pkthdr.csum_flags |=
   7241 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7242 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7243 				if (errors & WRX_ER_TCPE)
   7244 					m->m_pkthdr.csum_flags |=
   7245 					    M_CSUM_TCP_UDP_BAD;
   7246 			}
   7247 		}
   7248 
   7249 		ifp->if_ipackets++;
   7250 
   7251 		WM_RX_UNLOCK(rxq);
   7252 
   7253 		/* Pass this up to any BPF listeners. */
   7254 		bpf_mtap(ifp, m);
   7255 
   7256 		/* Pass it on. */
   7257 		if_percpuq_enqueue(sc->sc_ipq, m);
   7258 
   7259 		WM_RX_LOCK(rxq);
   7260 
   7261 		if (sc->sc_stopping)
   7262 			break;
   7263 	}
   7264 
   7265 	/* Update the receive pointer. */
   7266 	rxq->rxq_ptr = i;
   7267 	if (count != 0)
   7268 		rnd_add_uint32(&sc->rnd_source, count);
   7269 
   7270 	DPRINTF(WM_DEBUG_RX,
   7271 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7272 }
   7273 
   7274 /*
   7275  * wm_linkintr_gmii:
   7276  *
   7277  *	Helper; handle link interrupts for GMII.
   7278  */
   7279 static void
   7280 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7281 {
   7282 
   7283 	KASSERT(WM_CORE_LOCKED(sc));
   7284 
   7285 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7286 		__func__));
   7287 
   7288 	if (icr & ICR_LSC) {
   7289 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7290 
   7291 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7292 			wm_gig_downshift_workaround_ich8lan(sc);
   7293 
   7294 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7295 			device_xname(sc->sc_dev)));
   7296 		mii_pollstat(&sc->sc_mii);
   7297 		if (sc->sc_type == WM_T_82543) {
   7298 			int miistatus, active;
   7299 
   7300 			/*
   7301 			 * With 82543, we need to force speed and
   7302 			 * duplex on the MAC equal to what the PHY
   7303 			 * speed and duplex configuration is.
   7304 			 */
   7305 			miistatus = sc->sc_mii.mii_media_status;
   7306 
   7307 			if (miistatus & IFM_ACTIVE) {
   7308 				active = sc->sc_mii.mii_media_active;
   7309 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7310 				switch (IFM_SUBTYPE(active)) {
   7311 				case IFM_10_T:
   7312 					sc->sc_ctrl |= CTRL_SPEED_10;
   7313 					break;
   7314 				case IFM_100_TX:
   7315 					sc->sc_ctrl |= CTRL_SPEED_100;
   7316 					break;
   7317 				case IFM_1000_T:
   7318 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7319 					break;
   7320 				default:
   7321 					/*
   7322 					 * fiber?
   7323 					 * Shoud not enter here.
   7324 					 */
   7325 					printf("unknown media (%x)\n", active);
   7326 					break;
   7327 				}
   7328 				if (active & IFM_FDX)
   7329 					sc->sc_ctrl |= CTRL_FD;
   7330 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7331 			}
   7332 		} else if ((sc->sc_type == WM_T_ICH8)
   7333 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7334 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7335 		} else if (sc->sc_type == WM_T_PCH) {
   7336 			wm_k1_gig_workaround_hv(sc,
   7337 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7338 		}
   7339 
   7340 		if ((sc->sc_phytype == WMPHY_82578)
   7341 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7342 			== IFM_1000_T)) {
   7343 
   7344 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7345 				delay(200*1000); /* XXX too big */
   7346 
   7347 				/* Link stall fix for link up */
   7348 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7349 				    HV_MUX_DATA_CTRL,
   7350 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7351 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7352 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7353 				    HV_MUX_DATA_CTRL,
   7354 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7355 			}
   7356 		}
   7357 	} else if (icr & ICR_RXSEQ) {
   7358 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7359 			device_xname(sc->sc_dev)));
   7360 	}
   7361 }
   7362 
   7363 /*
   7364  * wm_linkintr_tbi:
   7365  *
   7366  *	Helper; handle link interrupts for TBI mode.
   7367  */
   7368 static void
   7369 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7370 {
   7371 	uint32_t status;
   7372 
   7373 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7374 		__func__));
   7375 
   7376 	status = CSR_READ(sc, WMREG_STATUS);
   7377 	if (icr & ICR_LSC) {
   7378 		if (status & STATUS_LU) {
   7379 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7380 			    device_xname(sc->sc_dev),
   7381 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7382 			/*
   7383 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7384 			 * so we should update sc->sc_ctrl
   7385 			 */
   7386 
   7387 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7388 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7389 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7390 			if (status & STATUS_FD)
   7391 				sc->sc_tctl |=
   7392 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7393 			else
   7394 				sc->sc_tctl |=
   7395 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7396 			if (sc->sc_ctrl & CTRL_TFCE)
   7397 				sc->sc_fcrtl |= FCRTL_XONE;
   7398 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7399 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7400 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7401 				      sc->sc_fcrtl);
   7402 			sc->sc_tbi_linkup = 1;
   7403 		} else {
   7404 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7405 			    device_xname(sc->sc_dev)));
   7406 			sc->sc_tbi_linkup = 0;
   7407 		}
   7408 		/* Update LED */
   7409 		wm_tbi_serdes_set_linkled(sc);
   7410 	} else if (icr & ICR_RXSEQ) {
   7411 		DPRINTF(WM_DEBUG_LINK,
   7412 		    ("%s: LINK: Receive sequence error\n",
   7413 		    device_xname(sc->sc_dev)));
   7414 	}
   7415 }
   7416 
   7417 /*
   7418  * wm_linkintr_serdes:
   7419  *
   7420  *	Helper; handle link interrupts for TBI mode.
   7421  */
   7422 static void
   7423 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7424 {
   7425 	struct mii_data *mii = &sc->sc_mii;
   7426 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7427 	uint32_t pcs_adv, pcs_lpab, reg;
   7428 
   7429 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7430 		__func__));
   7431 
   7432 	if (icr & ICR_LSC) {
   7433 		/* Check PCS */
   7434 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7435 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7436 			mii->mii_media_status |= IFM_ACTIVE;
   7437 			sc->sc_tbi_linkup = 1;
   7438 		} else {
   7439 			mii->mii_media_status |= IFM_NONE;
   7440 			sc->sc_tbi_linkup = 0;
   7441 			wm_tbi_serdes_set_linkled(sc);
   7442 			return;
   7443 		}
   7444 		mii->mii_media_active |= IFM_1000_SX;
   7445 		if ((reg & PCS_LSTS_FDX) != 0)
   7446 			mii->mii_media_active |= IFM_FDX;
   7447 		else
   7448 			mii->mii_media_active |= IFM_HDX;
   7449 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7450 			/* Check flow */
   7451 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7452 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7453 				DPRINTF(WM_DEBUG_LINK,
   7454 				    ("XXX LINKOK but not ACOMP\n"));
   7455 				return;
   7456 			}
   7457 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7458 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7459 			DPRINTF(WM_DEBUG_LINK,
   7460 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7461 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7462 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7463 				mii->mii_media_active |= IFM_FLOW
   7464 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7465 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7466 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7467 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7468 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7469 				mii->mii_media_active |= IFM_FLOW
   7470 				    | IFM_ETH_TXPAUSE;
   7471 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7472 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7473 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7474 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7475 				mii->mii_media_active |= IFM_FLOW
   7476 				    | IFM_ETH_RXPAUSE;
   7477 		}
   7478 		/* Update LED */
   7479 		wm_tbi_serdes_set_linkled(sc);
   7480 	} else {
   7481 		DPRINTF(WM_DEBUG_LINK,
   7482 		    ("%s: LINK: Receive sequence error\n",
   7483 		    device_xname(sc->sc_dev)));
   7484 	}
   7485 }
   7486 
   7487 /*
   7488  * wm_linkintr:
   7489  *
   7490  *	Helper; handle link interrupts.
   7491  */
   7492 static void
   7493 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7494 {
   7495 
   7496 	KASSERT(WM_CORE_LOCKED(sc));
   7497 
   7498 	if (sc->sc_flags & WM_F_HAS_MII)
   7499 		wm_linkintr_gmii(sc, icr);
   7500 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7501 	    && (sc->sc_type >= WM_T_82575))
   7502 		wm_linkintr_serdes(sc, icr);
   7503 	else
   7504 		wm_linkintr_tbi(sc, icr);
   7505 }
   7506 
   7507 /*
   7508  * wm_intr_legacy:
   7509  *
   7510  *	Interrupt service routine for INTx and MSI.
   7511  */
   7512 static int
   7513 wm_intr_legacy(void *arg)
   7514 {
   7515 	struct wm_softc *sc = arg;
   7516 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7517 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7518 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7519 	uint32_t icr, rndval = 0;
   7520 	int handled = 0;
   7521 
   7522 	DPRINTF(WM_DEBUG_TX,
   7523 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7524 	while (1 /* CONSTCOND */) {
   7525 		icr = CSR_READ(sc, WMREG_ICR);
   7526 		if ((icr & sc->sc_icr) == 0)
   7527 			break;
   7528 		if (rndval == 0)
   7529 			rndval = icr;
   7530 
   7531 		WM_RX_LOCK(rxq);
   7532 
   7533 		if (sc->sc_stopping) {
   7534 			WM_RX_UNLOCK(rxq);
   7535 			break;
   7536 		}
   7537 
   7538 		handled = 1;
   7539 
   7540 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7541 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7542 			DPRINTF(WM_DEBUG_RX,
   7543 			    ("%s: RX: got Rx intr 0x%08x\n",
   7544 			    device_xname(sc->sc_dev),
   7545 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7546 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7547 		}
   7548 #endif
   7549 		wm_rxeof(rxq);
   7550 
   7551 		WM_RX_UNLOCK(rxq);
   7552 		WM_TX_LOCK(txq);
   7553 
   7554 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7555 		if (icr & ICR_TXDW) {
   7556 			DPRINTF(WM_DEBUG_TX,
   7557 			    ("%s: TX: got TXDW interrupt\n",
   7558 			    device_xname(sc->sc_dev)));
   7559 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7560 		}
   7561 #endif
   7562 		wm_txeof(sc);
   7563 
   7564 		WM_TX_UNLOCK(txq);
   7565 		WM_CORE_LOCK(sc);
   7566 
   7567 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7568 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7569 			wm_linkintr(sc, icr);
   7570 		}
   7571 
   7572 		WM_CORE_UNLOCK(sc);
   7573 
   7574 		if (icr & ICR_RXO) {
   7575 #if defined(WM_DEBUG)
   7576 			log(LOG_WARNING, "%s: Receive overrun\n",
   7577 			    device_xname(sc->sc_dev));
   7578 #endif /* defined(WM_DEBUG) */
   7579 		}
   7580 	}
   7581 
   7582 	rnd_add_uint32(&sc->rnd_source, rndval);
   7583 
   7584 	if (handled) {
   7585 		/* Try to get more packets going. */
   7586 		ifp->if_start(ifp);
   7587 	}
   7588 
   7589 	return handled;
   7590 }
   7591 
   7592 /*
   7593  * wm_txintr_msix:
   7594  *
   7595  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7596  */
   7597 static int
   7598 wm_txintr_msix(void *arg)
   7599 {
   7600 	struct wm_txqueue *txq = arg;
   7601 	struct wm_softc *sc = txq->txq_sc;
   7602 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7603 	int handled = 0;
   7604 
   7605 	DPRINTF(WM_DEBUG_TX,
   7606 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7607 
   7608 	if (sc->sc_type == WM_T_82574)
   7609 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id));
   7610 	else if (sc->sc_type == WM_T_82575)
   7611 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7612 	else
   7613 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7614 
   7615 	WM_TX_LOCK(txq);
   7616 
   7617 	if (sc->sc_stopping)
   7618 		goto out;
   7619 
   7620 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7621 	handled = wm_txeof(sc);
   7622 
   7623 out:
   7624 	WM_TX_UNLOCK(txq);
   7625 
   7626 	if (sc->sc_type == WM_T_82574)
   7627 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id));
   7628 	else if (sc->sc_type == WM_T_82575)
   7629 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7630 	else
   7631 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7632 
   7633 	if (handled) {
   7634 		/* Try to get more packets going. */
   7635 		ifp->if_start(ifp);
   7636 	}
   7637 
   7638 	return handled;
   7639 }
   7640 
   7641 /*
   7642  * wm_rxintr_msix:
   7643  *
   7644  *	Interrupt service routine for RX interrupt for MSI-X.
   7645  */
   7646 static int
   7647 wm_rxintr_msix(void *arg)
   7648 {
   7649 	struct wm_rxqueue *rxq = arg;
   7650 	struct wm_softc *sc = rxq->rxq_sc;
   7651 
   7652 	DPRINTF(WM_DEBUG_RX,
   7653 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7654 
   7655 	if (sc->sc_type == WM_T_82574)
   7656 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id));
   7657 	else if (sc->sc_type == WM_T_82575)
   7658 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7659 	else
   7660 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7661 
   7662 	WM_RX_LOCK(rxq);
   7663 
   7664 	if (sc->sc_stopping)
   7665 		goto out;
   7666 
   7667 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7668 	wm_rxeof(rxq);
   7669 
   7670 out:
   7671 	WM_RX_UNLOCK(rxq);
   7672 
   7673 	if (sc->sc_type == WM_T_82574)
   7674 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7675 	else if (sc->sc_type == WM_T_82575)
   7676 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7677 	else
   7678 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7679 
   7680 	return 1;
   7681 }
   7682 
   7683 /*
   7684  * wm_linkintr_msix:
   7685  *
   7686  *	Interrupt service routine for link status change for MSI-X.
   7687  */
   7688 static int
   7689 wm_linkintr_msix(void *arg)
   7690 {
   7691 	struct wm_softc *sc = arg;
   7692 	uint32_t reg;
   7693 
   7694 	DPRINTF(WM_DEBUG_LINK,
   7695 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7696 
   7697 	reg = CSR_READ(sc, WMREG_ICR);
   7698 	WM_CORE_LOCK(sc);
   7699 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7700 		goto out;
   7701 
   7702 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7703 	wm_linkintr(sc, ICR_LSC);
   7704 
   7705 out:
   7706 	WM_CORE_UNLOCK(sc);
   7707 
   7708 	if (sc->sc_type == WM_T_82574)
   7709 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7710 	else if (sc->sc_type == WM_T_82575)
   7711 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7712 	else
   7713 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7714 
   7715 	return 1;
   7716 }
   7717 
   7718 /*
   7719  * Media related.
   7720  * GMII, SGMII, TBI (and SERDES)
   7721  */
   7722 
   7723 /* Common */
   7724 
   7725 /*
   7726  * wm_tbi_serdes_set_linkled:
   7727  *
   7728  *	Update the link LED on TBI and SERDES devices.
   7729  */
   7730 static void
   7731 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7732 {
   7733 
   7734 	if (sc->sc_tbi_linkup)
   7735 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7736 	else
   7737 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7738 
   7739 	/* 82540 or newer devices are active low */
   7740 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7741 
   7742 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7743 }
   7744 
   7745 /* GMII related */
   7746 
   7747 /*
   7748  * wm_gmii_reset:
   7749  *
   7750  *	Reset the PHY.
   7751  */
   7752 static void
   7753 wm_gmii_reset(struct wm_softc *sc)
   7754 {
   7755 	uint32_t reg;
   7756 	int rv;
   7757 
   7758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7759 		device_xname(sc->sc_dev), __func__));
   7760 	/* get phy semaphore */
   7761 	switch (sc->sc_type) {
   7762 	case WM_T_82571:
   7763 	case WM_T_82572:
   7764 	case WM_T_82573:
   7765 	case WM_T_82574:
   7766 	case WM_T_82583:
   7767 		 /* XXX should get sw semaphore, too */
   7768 		rv = wm_get_swsm_semaphore(sc);
   7769 		break;
   7770 	case WM_T_82575:
   7771 	case WM_T_82576:
   7772 	case WM_T_82580:
   7773 	case WM_T_I350:
   7774 	case WM_T_I354:
   7775 	case WM_T_I210:
   7776 	case WM_T_I211:
   7777 	case WM_T_80003:
   7778 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7779 		break;
   7780 	case WM_T_ICH8:
   7781 	case WM_T_ICH9:
   7782 	case WM_T_ICH10:
   7783 	case WM_T_PCH:
   7784 	case WM_T_PCH2:
   7785 	case WM_T_PCH_LPT:
   7786 	case WM_T_PCH_SPT:
   7787 		rv = wm_get_swfwhw_semaphore(sc);
   7788 		break;
   7789 	default:
   7790 		/* nothing to do*/
   7791 		rv = 0;
   7792 		break;
   7793 	}
   7794 	if (rv != 0) {
   7795 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7796 		    __func__);
   7797 		return;
   7798 	}
   7799 
   7800 	switch (sc->sc_type) {
   7801 	case WM_T_82542_2_0:
   7802 	case WM_T_82542_2_1:
   7803 		/* null */
   7804 		break;
   7805 	case WM_T_82543:
   7806 		/*
   7807 		 * With 82543, we need to force speed and duplex on the MAC
   7808 		 * equal to what the PHY speed and duplex configuration is.
   7809 		 * In addition, we need to perform a hardware reset on the PHY
   7810 		 * to take it out of reset.
   7811 		 */
   7812 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7813 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7814 
   7815 		/* The PHY reset pin is active-low. */
   7816 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7817 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7818 		    CTRL_EXT_SWDPIN(4));
   7819 		reg |= CTRL_EXT_SWDPIO(4);
   7820 
   7821 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7822 		CSR_WRITE_FLUSH(sc);
   7823 		delay(10*1000);
   7824 
   7825 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7826 		CSR_WRITE_FLUSH(sc);
   7827 		delay(150);
   7828 #if 0
   7829 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7830 #endif
   7831 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7832 		break;
   7833 	case WM_T_82544:	/* reset 10000us */
   7834 	case WM_T_82540:
   7835 	case WM_T_82545:
   7836 	case WM_T_82545_3:
   7837 	case WM_T_82546:
   7838 	case WM_T_82546_3:
   7839 	case WM_T_82541:
   7840 	case WM_T_82541_2:
   7841 	case WM_T_82547:
   7842 	case WM_T_82547_2:
   7843 	case WM_T_82571:	/* reset 100us */
   7844 	case WM_T_82572:
   7845 	case WM_T_82573:
   7846 	case WM_T_82574:
   7847 	case WM_T_82575:
   7848 	case WM_T_82576:
   7849 	case WM_T_82580:
   7850 	case WM_T_I350:
   7851 	case WM_T_I354:
   7852 	case WM_T_I210:
   7853 	case WM_T_I211:
   7854 	case WM_T_82583:
   7855 	case WM_T_80003:
   7856 		/* generic reset */
   7857 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7858 		CSR_WRITE_FLUSH(sc);
   7859 		delay(20000);
   7860 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7861 		CSR_WRITE_FLUSH(sc);
   7862 		delay(20000);
   7863 
   7864 		if ((sc->sc_type == WM_T_82541)
   7865 		    || (sc->sc_type == WM_T_82541_2)
   7866 		    || (sc->sc_type == WM_T_82547)
   7867 		    || (sc->sc_type == WM_T_82547_2)) {
   7868 			/* workaround for igp are done in igp_reset() */
   7869 			/* XXX add code to set LED after phy reset */
   7870 		}
   7871 		break;
   7872 	case WM_T_ICH8:
   7873 	case WM_T_ICH9:
   7874 	case WM_T_ICH10:
   7875 	case WM_T_PCH:
   7876 	case WM_T_PCH2:
   7877 	case WM_T_PCH_LPT:
   7878 	case WM_T_PCH_SPT:
   7879 		/* generic reset */
   7880 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7881 		CSR_WRITE_FLUSH(sc);
   7882 		delay(100);
   7883 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7884 		CSR_WRITE_FLUSH(sc);
   7885 		delay(150);
   7886 		break;
   7887 	default:
   7888 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7889 		    __func__);
   7890 		break;
   7891 	}
   7892 
   7893 	/* release PHY semaphore */
   7894 	switch (sc->sc_type) {
   7895 	case WM_T_82571:
   7896 	case WM_T_82572:
   7897 	case WM_T_82573:
   7898 	case WM_T_82574:
   7899 	case WM_T_82583:
   7900 		 /* XXX should put sw semaphore, too */
   7901 		wm_put_swsm_semaphore(sc);
   7902 		break;
   7903 	case WM_T_82575:
   7904 	case WM_T_82576:
   7905 	case WM_T_82580:
   7906 	case WM_T_I350:
   7907 	case WM_T_I354:
   7908 	case WM_T_I210:
   7909 	case WM_T_I211:
   7910 	case WM_T_80003:
   7911 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7912 		break;
   7913 	case WM_T_ICH8:
   7914 	case WM_T_ICH9:
   7915 	case WM_T_ICH10:
   7916 	case WM_T_PCH:
   7917 	case WM_T_PCH2:
   7918 	case WM_T_PCH_LPT:
   7919 	case WM_T_PCH_SPT:
   7920 		wm_put_swfwhw_semaphore(sc);
   7921 		break;
   7922 	default:
   7923 		/* nothing to do */
   7924 		rv = 0;
   7925 		break;
   7926 	}
   7927 
   7928 	/* get_cfg_done */
   7929 	wm_get_cfg_done(sc);
   7930 
   7931 	/* extra setup */
   7932 	switch (sc->sc_type) {
   7933 	case WM_T_82542_2_0:
   7934 	case WM_T_82542_2_1:
   7935 	case WM_T_82543:
   7936 	case WM_T_82544:
   7937 	case WM_T_82540:
   7938 	case WM_T_82545:
   7939 	case WM_T_82545_3:
   7940 	case WM_T_82546:
   7941 	case WM_T_82546_3:
   7942 	case WM_T_82541_2:
   7943 	case WM_T_82547_2:
   7944 	case WM_T_82571:
   7945 	case WM_T_82572:
   7946 	case WM_T_82573:
   7947 	case WM_T_82575:
   7948 	case WM_T_82576:
   7949 	case WM_T_82580:
   7950 	case WM_T_I350:
   7951 	case WM_T_I354:
   7952 	case WM_T_I210:
   7953 	case WM_T_I211:
   7954 	case WM_T_80003:
   7955 		/* null */
   7956 		break;
   7957 	case WM_T_82574:
   7958 	case WM_T_82583:
   7959 		wm_lplu_d0_disable(sc);
   7960 		break;
   7961 	case WM_T_82541:
   7962 	case WM_T_82547:
   7963 		/* XXX Configure actively LED after PHY reset */
   7964 		break;
   7965 	case WM_T_ICH8:
   7966 	case WM_T_ICH9:
   7967 	case WM_T_ICH10:
   7968 	case WM_T_PCH:
   7969 	case WM_T_PCH2:
   7970 	case WM_T_PCH_LPT:
   7971 	case WM_T_PCH_SPT:
   7972 		/* Allow time for h/w to get to a quiescent state afer reset */
   7973 		delay(10*1000);
   7974 
   7975 		if (sc->sc_type == WM_T_PCH)
   7976 			wm_hv_phy_workaround_ich8lan(sc);
   7977 
   7978 		if (sc->sc_type == WM_T_PCH2)
   7979 			wm_lv_phy_workaround_ich8lan(sc);
   7980 
   7981 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7982 			/*
   7983 			 * dummy read to clear the phy wakeup bit after lcd
   7984 			 * reset
   7985 			 */
   7986 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7987 		}
   7988 
   7989 		/*
   7990 		 * XXX Configure the LCD with th extended configuration region
   7991 		 * in NVM
   7992 		 */
   7993 
   7994 		/* Disable D0 LPLU. */
   7995 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7996 			wm_lplu_d0_disable_pch(sc);
   7997 		else
   7998 			wm_lplu_d0_disable(sc);	/* ICH* */
   7999 		break;
   8000 	default:
   8001 		panic("%s: unknown type\n", __func__);
   8002 		break;
   8003 	}
   8004 }
   8005 
   8006 /*
   8007  * wm_get_phy_id_82575:
   8008  *
   8009  * Return PHY ID. Return -1 if it failed.
   8010  */
   8011 static int
   8012 wm_get_phy_id_82575(struct wm_softc *sc)
   8013 {
   8014 	uint32_t reg;
   8015 	int phyid = -1;
   8016 
   8017 	/* XXX */
   8018 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8019 		return -1;
   8020 
   8021 	if (wm_sgmii_uses_mdio(sc)) {
   8022 		switch (sc->sc_type) {
   8023 		case WM_T_82575:
   8024 		case WM_T_82576:
   8025 			reg = CSR_READ(sc, WMREG_MDIC);
   8026 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8027 			break;
   8028 		case WM_T_82580:
   8029 		case WM_T_I350:
   8030 		case WM_T_I354:
   8031 		case WM_T_I210:
   8032 		case WM_T_I211:
   8033 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8034 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8035 			break;
   8036 		default:
   8037 			return -1;
   8038 		}
   8039 	}
   8040 
   8041 	return phyid;
   8042 }
   8043 
   8044 
   8045 /*
   8046  * wm_gmii_mediainit:
   8047  *
   8048  *	Initialize media for use on 1000BASE-T devices.
   8049  */
   8050 static void
   8051 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8052 {
   8053 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8054 	struct mii_data *mii = &sc->sc_mii;
   8055 	uint32_t reg;
   8056 
   8057 	/* We have GMII. */
   8058 	sc->sc_flags |= WM_F_HAS_MII;
   8059 
   8060 	if (sc->sc_type == WM_T_80003)
   8061 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8062 	else
   8063 		sc->sc_tipg = TIPG_1000T_DFLT;
   8064 
   8065 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8066 	if ((sc->sc_type == WM_T_82580)
   8067 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8068 	    || (sc->sc_type == WM_T_I211)) {
   8069 		reg = CSR_READ(sc, WMREG_PHPM);
   8070 		reg &= ~PHPM_GO_LINK_D;
   8071 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8072 	}
   8073 
   8074 	/*
   8075 	 * Let the chip set speed/duplex on its own based on
   8076 	 * signals from the PHY.
   8077 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8078 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8079 	 */
   8080 	sc->sc_ctrl |= CTRL_SLU;
   8081 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8082 
   8083 	/* Initialize our media structures and probe the GMII. */
   8084 	mii->mii_ifp = ifp;
   8085 
   8086 	/*
   8087 	 * Determine the PHY access method.
   8088 	 *
   8089 	 *  For SGMII, use SGMII specific method.
   8090 	 *
   8091 	 *  For some devices, we can determine the PHY access method
   8092 	 * from sc_type.
   8093 	 *
   8094 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8095 	 * access  method by sc_type, so use the PCI product ID for some
   8096 	 * devices.
   8097 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8098 	 * can't detect, then use bm's method.
   8099 	 */
   8100 	switch (prodid) {
   8101 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8102 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8103 		/* 82577 */
   8104 		sc->sc_phytype = WMPHY_82577;
   8105 		break;
   8106 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8107 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8108 		/* 82578 */
   8109 		sc->sc_phytype = WMPHY_82578;
   8110 		break;
   8111 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8112 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8113 		/* 82579 */
   8114 		sc->sc_phytype = WMPHY_82579;
   8115 		break;
   8116 	case PCI_PRODUCT_INTEL_82801I_BM:
   8117 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8118 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8119 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8120 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8121 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8122 		/* 82567 */
   8123 		sc->sc_phytype = WMPHY_BM;
   8124 		mii->mii_readreg = wm_gmii_bm_readreg;
   8125 		mii->mii_writereg = wm_gmii_bm_writereg;
   8126 		break;
   8127 	default:
   8128 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8129 		    && !wm_sgmii_uses_mdio(sc)){
   8130 			/* SGMII */
   8131 			mii->mii_readreg = wm_sgmii_readreg;
   8132 			mii->mii_writereg = wm_sgmii_writereg;
   8133 		} else if (sc->sc_type >= WM_T_80003) {
   8134 			/* 80003 */
   8135 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8136 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8137 		} else if (sc->sc_type >= WM_T_I210) {
   8138 			/* I210 and I211 */
   8139 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8140 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8141 		} else if (sc->sc_type >= WM_T_82580) {
   8142 			/* 82580, I350 and I354 */
   8143 			sc->sc_phytype = WMPHY_82580;
   8144 			mii->mii_readreg = wm_gmii_82580_readreg;
   8145 			mii->mii_writereg = wm_gmii_82580_writereg;
   8146 		} else if (sc->sc_type >= WM_T_82544) {
   8147 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8148 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8149 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8150 		} else {
   8151 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8152 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8153 		}
   8154 		break;
   8155 	}
   8156 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8157 		/* All PCH* use _hv_ */
   8158 		mii->mii_readreg = wm_gmii_hv_readreg;
   8159 		mii->mii_writereg = wm_gmii_hv_writereg;
   8160 	}
   8161 	mii->mii_statchg = wm_gmii_statchg;
   8162 
   8163 	wm_gmii_reset(sc);
   8164 
   8165 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8166 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8167 	    wm_gmii_mediastatus);
   8168 
   8169 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8170 	    || (sc->sc_type == WM_T_82580)
   8171 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8172 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8173 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8174 			/* Attach only one port */
   8175 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8176 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8177 		} else {
   8178 			int i, id;
   8179 			uint32_t ctrl_ext;
   8180 
   8181 			id = wm_get_phy_id_82575(sc);
   8182 			if (id != -1) {
   8183 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8184 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8185 			}
   8186 			if ((id == -1)
   8187 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8188 				/* Power on sgmii phy if it is disabled */
   8189 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8190 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8191 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8192 				CSR_WRITE_FLUSH(sc);
   8193 				delay(300*1000); /* XXX too long */
   8194 
   8195 				/* from 1 to 8 */
   8196 				for (i = 1; i < 8; i++)
   8197 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8198 					    0xffffffff, i, MII_OFFSET_ANY,
   8199 					    MIIF_DOPAUSE);
   8200 
   8201 				/* restore previous sfp cage power state */
   8202 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8203 			}
   8204 		}
   8205 	} else {
   8206 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8207 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8208 	}
   8209 
   8210 	/*
   8211 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8212 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8213 	 */
   8214 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8215 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8216 		wm_set_mdio_slow_mode_hv(sc);
   8217 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8218 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8219 	}
   8220 
   8221 	/*
   8222 	 * (For ICH8 variants)
   8223 	 * If PHY detection failed, use BM's r/w function and retry.
   8224 	 */
   8225 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8226 		/* if failed, retry with *_bm_* */
   8227 		mii->mii_readreg = wm_gmii_bm_readreg;
   8228 		mii->mii_writereg = wm_gmii_bm_writereg;
   8229 
   8230 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8231 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8232 	}
   8233 
   8234 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8235 		/* Any PHY wasn't find */
   8236 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8237 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8238 		sc->sc_phytype = WMPHY_NONE;
   8239 	} else {
   8240 		/*
   8241 		 * PHY Found!
   8242 		 * Check PHY type.
   8243 		 */
   8244 		uint32_t model;
   8245 		struct mii_softc *child;
   8246 
   8247 		child = LIST_FIRST(&mii->mii_phys);
   8248 		model = child->mii_mpd_model;
   8249 		if (model == MII_MODEL_yyINTEL_I82566)
   8250 			sc->sc_phytype = WMPHY_IGP_3;
   8251 
   8252 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8253 	}
   8254 }
   8255 
   8256 /*
   8257  * wm_gmii_mediachange:	[ifmedia interface function]
   8258  *
   8259  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8260  */
   8261 static int
   8262 wm_gmii_mediachange(struct ifnet *ifp)
   8263 {
   8264 	struct wm_softc *sc = ifp->if_softc;
   8265 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8266 	int rc;
   8267 
   8268 	if ((ifp->if_flags & IFF_UP) == 0)
   8269 		return 0;
   8270 
   8271 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8272 	sc->sc_ctrl |= CTRL_SLU;
   8273 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8274 	    || (sc->sc_type > WM_T_82543)) {
   8275 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8276 	} else {
   8277 		sc->sc_ctrl &= ~CTRL_ASDE;
   8278 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8279 		if (ife->ifm_media & IFM_FDX)
   8280 			sc->sc_ctrl |= CTRL_FD;
   8281 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8282 		case IFM_10_T:
   8283 			sc->sc_ctrl |= CTRL_SPEED_10;
   8284 			break;
   8285 		case IFM_100_TX:
   8286 			sc->sc_ctrl |= CTRL_SPEED_100;
   8287 			break;
   8288 		case IFM_1000_T:
   8289 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8290 			break;
   8291 		default:
   8292 			panic("wm_gmii_mediachange: bad media 0x%x",
   8293 			    ife->ifm_media);
   8294 		}
   8295 	}
   8296 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8297 	if (sc->sc_type <= WM_T_82543)
   8298 		wm_gmii_reset(sc);
   8299 
   8300 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8301 		return 0;
   8302 	return rc;
   8303 }
   8304 
   8305 /*
   8306  * wm_gmii_mediastatus:	[ifmedia interface function]
   8307  *
   8308  *	Get the current interface media status on a 1000BASE-T device.
   8309  */
   8310 static void
   8311 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8312 {
   8313 	struct wm_softc *sc = ifp->if_softc;
   8314 
   8315 	ether_mediastatus(ifp, ifmr);
   8316 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8317 	    | sc->sc_flowflags;
   8318 }
   8319 
   8320 #define	MDI_IO		CTRL_SWDPIN(2)
   8321 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8322 #define	MDI_CLK		CTRL_SWDPIN(3)
   8323 
   8324 static void
   8325 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8326 {
   8327 	uint32_t i, v;
   8328 
   8329 	v = CSR_READ(sc, WMREG_CTRL);
   8330 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8331 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8332 
   8333 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8334 		if (data & i)
   8335 			v |= MDI_IO;
   8336 		else
   8337 			v &= ~MDI_IO;
   8338 		CSR_WRITE(sc, WMREG_CTRL, v);
   8339 		CSR_WRITE_FLUSH(sc);
   8340 		delay(10);
   8341 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8342 		CSR_WRITE_FLUSH(sc);
   8343 		delay(10);
   8344 		CSR_WRITE(sc, WMREG_CTRL, v);
   8345 		CSR_WRITE_FLUSH(sc);
   8346 		delay(10);
   8347 	}
   8348 }
   8349 
   8350 static uint32_t
   8351 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8352 {
   8353 	uint32_t v, i, data = 0;
   8354 
   8355 	v = CSR_READ(sc, WMREG_CTRL);
   8356 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8357 	v |= CTRL_SWDPIO(3);
   8358 
   8359 	CSR_WRITE(sc, WMREG_CTRL, v);
   8360 	CSR_WRITE_FLUSH(sc);
   8361 	delay(10);
   8362 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8363 	CSR_WRITE_FLUSH(sc);
   8364 	delay(10);
   8365 	CSR_WRITE(sc, WMREG_CTRL, v);
   8366 	CSR_WRITE_FLUSH(sc);
   8367 	delay(10);
   8368 
   8369 	for (i = 0; i < 16; i++) {
   8370 		data <<= 1;
   8371 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8372 		CSR_WRITE_FLUSH(sc);
   8373 		delay(10);
   8374 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8375 			data |= 1;
   8376 		CSR_WRITE(sc, WMREG_CTRL, v);
   8377 		CSR_WRITE_FLUSH(sc);
   8378 		delay(10);
   8379 	}
   8380 
   8381 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8382 	CSR_WRITE_FLUSH(sc);
   8383 	delay(10);
   8384 	CSR_WRITE(sc, WMREG_CTRL, v);
   8385 	CSR_WRITE_FLUSH(sc);
   8386 	delay(10);
   8387 
   8388 	return data;
   8389 }
   8390 
   8391 #undef MDI_IO
   8392 #undef MDI_DIR
   8393 #undef MDI_CLK
   8394 
   8395 /*
   8396  * wm_gmii_i82543_readreg:	[mii interface function]
   8397  *
   8398  *	Read a PHY register on the GMII (i82543 version).
   8399  */
   8400 static int
   8401 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8402 {
   8403 	struct wm_softc *sc = device_private(self);
   8404 	int rv;
   8405 
   8406 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8407 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8408 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8409 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8410 
   8411 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8412 	    device_xname(sc->sc_dev), phy, reg, rv));
   8413 
   8414 	return rv;
   8415 }
   8416 
   8417 /*
   8418  * wm_gmii_i82543_writereg:	[mii interface function]
   8419  *
   8420  *	Write a PHY register on the GMII (i82543 version).
   8421  */
   8422 static void
   8423 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8424 {
   8425 	struct wm_softc *sc = device_private(self);
   8426 
   8427 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8428 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8429 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8430 	    (MII_COMMAND_START << 30), 32);
   8431 }
   8432 
   8433 /*
   8434  * wm_gmii_i82544_readreg:	[mii interface function]
   8435  *
   8436  *	Read a PHY register on the GMII.
   8437  */
   8438 static int
   8439 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8440 {
   8441 	struct wm_softc *sc = device_private(self);
   8442 	uint32_t mdic = 0;
   8443 	int i, rv;
   8444 
   8445 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8446 	    MDIC_REGADD(reg));
   8447 
   8448 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8449 		mdic = CSR_READ(sc, WMREG_MDIC);
   8450 		if (mdic & MDIC_READY)
   8451 			break;
   8452 		delay(50);
   8453 	}
   8454 
   8455 	if ((mdic & MDIC_READY) == 0) {
   8456 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8457 		    device_xname(sc->sc_dev), phy, reg);
   8458 		rv = 0;
   8459 	} else if (mdic & MDIC_E) {
   8460 #if 0 /* This is normal if no PHY is present. */
   8461 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8462 		    device_xname(sc->sc_dev), phy, reg);
   8463 #endif
   8464 		rv = 0;
   8465 	} else {
   8466 		rv = MDIC_DATA(mdic);
   8467 		if (rv == 0xffff)
   8468 			rv = 0;
   8469 	}
   8470 
   8471 	return rv;
   8472 }
   8473 
   8474 /*
   8475  * wm_gmii_i82544_writereg:	[mii interface function]
   8476  *
   8477  *	Write a PHY register on the GMII.
   8478  */
   8479 static void
   8480 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8481 {
   8482 	struct wm_softc *sc = device_private(self);
   8483 	uint32_t mdic = 0;
   8484 	int i;
   8485 
   8486 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8487 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8488 
   8489 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8490 		mdic = CSR_READ(sc, WMREG_MDIC);
   8491 		if (mdic & MDIC_READY)
   8492 			break;
   8493 		delay(50);
   8494 	}
   8495 
   8496 	if ((mdic & MDIC_READY) == 0)
   8497 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8498 		    device_xname(sc->sc_dev), phy, reg);
   8499 	else if (mdic & MDIC_E)
   8500 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8501 		    device_xname(sc->sc_dev), phy, reg);
   8502 }
   8503 
   8504 /*
   8505  * wm_gmii_i80003_readreg:	[mii interface function]
   8506  *
   8507  *	Read a PHY register on the kumeran
   8508  * This could be handled by the PHY layer if we didn't have to lock the
   8509  * ressource ...
   8510  */
   8511 static int
   8512 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8513 {
   8514 	struct wm_softc *sc = device_private(self);
   8515 	int sem;
   8516 	int rv;
   8517 
   8518 	if (phy != 1) /* only one PHY on kumeran bus */
   8519 		return 0;
   8520 
   8521 	sem = swfwphysem[sc->sc_funcid];
   8522 	if (wm_get_swfw_semaphore(sc, sem)) {
   8523 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8524 		    __func__);
   8525 		return 0;
   8526 	}
   8527 
   8528 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8529 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8530 		    reg >> GG82563_PAGE_SHIFT);
   8531 	} else {
   8532 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8533 		    reg >> GG82563_PAGE_SHIFT);
   8534 	}
   8535 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8536 	delay(200);
   8537 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8538 	delay(200);
   8539 
   8540 	wm_put_swfw_semaphore(sc, sem);
   8541 	return rv;
   8542 }
   8543 
   8544 /*
   8545  * wm_gmii_i80003_writereg:	[mii interface function]
   8546  *
   8547  *	Write a PHY register on the kumeran.
   8548  * This could be handled by the PHY layer if we didn't have to lock the
   8549  * ressource ...
   8550  */
   8551 static void
   8552 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8553 {
   8554 	struct wm_softc *sc = device_private(self);
   8555 	int sem;
   8556 
   8557 	if (phy != 1) /* only one PHY on kumeran bus */
   8558 		return;
   8559 
   8560 	sem = swfwphysem[sc->sc_funcid];
   8561 	if (wm_get_swfw_semaphore(sc, sem)) {
   8562 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8563 		    __func__);
   8564 		return;
   8565 	}
   8566 
   8567 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8568 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8569 		    reg >> GG82563_PAGE_SHIFT);
   8570 	} else {
   8571 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8572 		    reg >> GG82563_PAGE_SHIFT);
   8573 	}
   8574 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8575 	delay(200);
   8576 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8577 	delay(200);
   8578 
   8579 	wm_put_swfw_semaphore(sc, sem);
   8580 }
   8581 
   8582 /*
   8583  * wm_gmii_bm_readreg:	[mii interface function]
   8584  *
   8585  *	Read a PHY register on the kumeran
   8586  * This could be handled by the PHY layer if we didn't have to lock the
   8587  * ressource ...
   8588  */
   8589 static int
   8590 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8591 {
   8592 	struct wm_softc *sc = device_private(self);
   8593 	int sem;
   8594 	int rv;
   8595 
   8596 	sem = swfwphysem[sc->sc_funcid];
   8597 	if (wm_get_swfw_semaphore(sc, sem)) {
   8598 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8599 		    __func__);
   8600 		return 0;
   8601 	}
   8602 
   8603 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8604 		if (phy == 1)
   8605 			wm_gmii_i82544_writereg(self, phy,
   8606 			    MII_IGPHY_PAGE_SELECT, reg);
   8607 		else
   8608 			wm_gmii_i82544_writereg(self, phy,
   8609 			    GG82563_PHY_PAGE_SELECT,
   8610 			    reg >> GG82563_PAGE_SHIFT);
   8611 	}
   8612 
   8613 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8614 	wm_put_swfw_semaphore(sc, sem);
   8615 	return rv;
   8616 }
   8617 
   8618 /*
   8619  * wm_gmii_bm_writereg:	[mii interface function]
   8620  *
   8621  *	Write a PHY register on the kumeran.
   8622  * This could be handled by the PHY layer if we didn't have to lock the
   8623  * ressource ...
   8624  */
   8625 static void
   8626 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8627 {
   8628 	struct wm_softc *sc = device_private(self);
   8629 	int sem;
   8630 
   8631 	sem = swfwphysem[sc->sc_funcid];
   8632 	if (wm_get_swfw_semaphore(sc, sem)) {
   8633 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8634 		    __func__);
   8635 		return;
   8636 	}
   8637 
   8638 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8639 		if (phy == 1)
   8640 			wm_gmii_i82544_writereg(self, phy,
   8641 			    MII_IGPHY_PAGE_SELECT, reg);
   8642 		else
   8643 			wm_gmii_i82544_writereg(self, phy,
   8644 			    GG82563_PHY_PAGE_SELECT,
   8645 			    reg >> GG82563_PAGE_SHIFT);
   8646 	}
   8647 
   8648 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8649 	wm_put_swfw_semaphore(sc, sem);
   8650 }
   8651 
   8652 static void
   8653 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8654 {
   8655 	struct wm_softc *sc = device_private(self);
   8656 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8657 	uint16_t wuce;
   8658 
   8659 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8660 	if (sc->sc_type == WM_T_PCH) {
   8661 		/* XXX e1000 driver do nothing... why? */
   8662 	}
   8663 
   8664 	/* Set page 769 */
   8665 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8666 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8667 
   8668 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8669 
   8670 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8671 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8672 	    wuce | BM_WUC_ENABLE_BIT);
   8673 
   8674 	/* Select page 800 */
   8675 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8676 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8677 
   8678 	/* Write page 800 */
   8679 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8680 
   8681 	if (rd)
   8682 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8683 	else
   8684 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8685 
   8686 	/* Set page 769 */
   8687 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8688 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8689 
   8690 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8691 }
   8692 
   8693 /*
   8694  * wm_gmii_hv_readreg:	[mii interface function]
   8695  *
   8696  *	Read a PHY register on the kumeran
   8697  * This could be handled by the PHY layer if we didn't have to lock the
   8698  * ressource ...
   8699  */
   8700 static int
   8701 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8702 {
   8703 	struct wm_softc *sc = device_private(self);
   8704 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8705 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8706 	uint16_t val;
   8707 	int rv;
   8708 
   8709 	if (wm_get_swfwhw_semaphore(sc)) {
   8710 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8711 		    __func__);
   8712 		return 0;
   8713 	}
   8714 
   8715 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8716 	if (sc->sc_phytype == WMPHY_82577) {
   8717 		/* XXX must write */
   8718 	}
   8719 
   8720 	/* Page 800 works differently than the rest so it has its own func */
   8721 	if (page == BM_WUC_PAGE) {
   8722 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8723 		return val;
   8724 	}
   8725 
   8726 	/*
   8727 	 * Lower than page 768 works differently than the rest so it has its
   8728 	 * own func
   8729 	 */
   8730 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8731 		printf("gmii_hv_readreg!!!\n");
   8732 		return 0;
   8733 	}
   8734 
   8735 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8736 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8737 		    page << BME1000_PAGE_SHIFT);
   8738 	}
   8739 
   8740 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8741 	wm_put_swfwhw_semaphore(sc);
   8742 	return rv;
   8743 }
   8744 
   8745 /*
   8746  * wm_gmii_hv_writereg:	[mii interface function]
   8747  *
   8748  *	Write a PHY register on the kumeran.
   8749  * This could be handled by the PHY layer if we didn't have to lock the
   8750  * ressource ...
   8751  */
   8752 static void
   8753 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8754 {
   8755 	struct wm_softc *sc = device_private(self);
   8756 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8757 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8758 
   8759 	if (wm_get_swfwhw_semaphore(sc)) {
   8760 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8761 		    __func__);
   8762 		return;
   8763 	}
   8764 
   8765 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8766 
   8767 	/* Page 800 works differently than the rest so it has its own func */
   8768 	if (page == BM_WUC_PAGE) {
   8769 		uint16_t tmp;
   8770 
   8771 		tmp = val;
   8772 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8773 		return;
   8774 	}
   8775 
   8776 	/*
   8777 	 * Lower than page 768 works differently than the rest so it has its
   8778 	 * own func
   8779 	 */
   8780 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8781 		printf("gmii_hv_writereg!!!\n");
   8782 		return;
   8783 	}
   8784 
   8785 	/*
   8786 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8787 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8788 	 */
   8789 
   8790 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8791 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8792 		    page << BME1000_PAGE_SHIFT);
   8793 	}
   8794 
   8795 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8796 	wm_put_swfwhw_semaphore(sc);
   8797 }
   8798 
   8799 /*
   8800  * wm_gmii_82580_readreg:	[mii interface function]
   8801  *
   8802  *	Read a PHY register on the 82580 and I350.
   8803  * This could be handled by the PHY layer if we didn't have to lock the
   8804  * ressource ...
   8805  */
   8806 static int
   8807 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8808 {
   8809 	struct wm_softc *sc = device_private(self);
   8810 	int sem;
   8811 	int rv;
   8812 
   8813 	sem = swfwphysem[sc->sc_funcid];
   8814 	if (wm_get_swfw_semaphore(sc, sem)) {
   8815 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8816 		    __func__);
   8817 		return 0;
   8818 	}
   8819 
   8820 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8821 
   8822 	wm_put_swfw_semaphore(sc, sem);
   8823 	return rv;
   8824 }
   8825 
   8826 /*
   8827  * wm_gmii_82580_writereg:	[mii interface function]
   8828  *
   8829  *	Write a PHY register on the 82580 and I350.
   8830  * This could be handled by the PHY layer if we didn't have to lock the
   8831  * ressource ...
   8832  */
   8833 static void
   8834 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8835 {
   8836 	struct wm_softc *sc = device_private(self);
   8837 	int sem;
   8838 
   8839 	sem = swfwphysem[sc->sc_funcid];
   8840 	if (wm_get_swfw_semaphore(sc, sem)) {
   8841 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8842 		    __func__);
   8843 		return;
   8844 	}
   8845 
   8846 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8847 
   8848 	wm_put_swfw_semaphore(sc, sem);
   8849 }
   8850 
   8851 /*
   8852  * wm_gmii_gs40g_readreg:	[mii interface function]
   8853  *
   8854  *	Read a PHY register on the I2100 and I211.
   8855  * This could be handled by the PHY layer if we didn't have to lock the
   8856  * ressource ...
   8857  */
   8858 static int
   8859 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8860 {
   8861 	struct wm_softc *sc = device_private(self);
   8862 	int sem;
   8863 	int page, offset;
   8864 	int rv;
   8865 
   8866 	/* Acquire semaphore */
   8867 	sem = swfwphysem[sc->sc_funcid];
   8868 	if (wm_get_swfw_semaphore(sc, sem)) {
   8869 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8870 		    __func__);
   8871 		return 0;
   8872 	}
   8873 
   8874 	/* Page select */
   8875 	page = reg >> GS40G_PAGE_SHIFT;
   8876 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8877 
   8878 	/* Read reg */
   8879 	offset = reg & GS40G_OFFSET_MASK;
   8880 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8881 
   8882 	wm_put_swfw_semaphore(sc, sem);
   8883 	return rv;
   8884 }
   8885 
   8886 /*
   8887  * wm_gmii_gs40g_writereg:	[mii interface function]
   8888  *
   8889  *	Write a PHY register on the I210 and I211.
   8890  * This could be handled by the PHY layer if we didn't have to lock the
   8891  * ressource ...
   8892  */
   8893 static void
   8894 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8895 {
   8896 	struct wm_softc *sc = device_private(self);
   8897 	int sem;
   8898 	int page, offset;
   8899 
   8900 	/* Acquire semaphore */
   8901 	sem = swfwphysem[sc->sc_funcid];
   8902 	if (wm_get_swfw_semaphore(sc, sem)) {
   8903 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8904 		    __func__);
   8905 		return;
   8906 	}
   8907 
   8908 	/* Page select */
   8909 	page = reg >> GS40G_PAGE_SHIFT;
   8910 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8911 
   8912 	/* Write reg */
   8913 	offset = reg & GS40G_OFFSET_MASK;
   8914 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8915 
   8916 	/* Release semaphore */
   8917 	wm_put_swfw_semaphore(sc, sem);
   8918 }
   8919 
   8920 /*
   8921  * wm_gmii_statchg:	[mii interface function]
   8922  *
   8923  *	Callback from MII layer when media changes.
   8924  */
   8925 static void
   8926 wm_gmii_statchg(struct ifnet *ifp)
   8927 {
   8928 	struct wm_softc *sc = ifp->if_softc;
   8929 	struct mii_data *mii = &sc->sc_mii;
   8930 
   8931 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8932 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8933 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8934 
   8935 	/*
   8936 	 * Get flow control negotiation result.
   8937 	 */
   8938 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8939 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8940 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8941 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8942 	}
   8943 
   8944 	if (sc->sc_flowflags & IFM_FLOW) {
   8945 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8946 			sc->sc_ctrl |= CTRL_TFCE;
   8947 			sc->sc_fcrtl |= FCRTL_XONE;
   8948 		}
   8949 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8950 			sc->sc_ctrl |= CTRL_RFCE;
   8951 	}
   8952 
   8953 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8954 		DPRINTF(WM_DEBUG_LINK,
   8955 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8956 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8957 	} else {
   8958 		DPRINTF(WM_DEBUG_LINK,
   8959 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8960 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8961 	}
   8962 
   8963 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8964 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8965 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8966 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8967 	if (sc->sc_type == WM_T_80003) {
   8968 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8969 		case IFM_1000_T:
   8970 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8971 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8972 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8973 			break;
   8974 		default:
   8975 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8976 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8977 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8978 			break;
   8979 		}
   8980 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8981 	}
   8982 }
   8983 
   8984 /*
   8985  * wm_kmrn_readreg:
   8986  *
   8987  *	Read a kumeran register
   8988  */
   8989 static int
   8990 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8991 {
   8992 	int rv;
   8993 
   8994 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8995 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8996 			aprint_error_dev(sc->sc_dev,
   8997 			    "%s: failed to get semaphore\n", __func__);
   8998 			return 0;
   8999 		}
   9000 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9001 		if (wm_get_swfwhw_semaphore(sc)) {
   9002 			aprint_error_dev(sc->sc_dev,
   9003 			    "%s: failed to get semaphore\n", __func__);
   9004 			return 0;
   9005 		}
   9006 	}
   9007 
   9008 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9009 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9010 	    KUMCTRLSTA_REN);
   9011 	CSR_WRITE_FLUSH(sc);
   9012 	delay(2);
   9013 
   9014 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9015 
   9016 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9017 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9018 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9019 		wm_put_swfwhw_semaphore(sc);
   9020 
   9021 	return rv;
   9022 }
   9023 
   9024 /*
   9025  * wm_kmrn_writereg:
   9026  *
   9027  *	Write a kumeran register
   9028  */
   9029 static void
   9030 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9031 {
   9032 
   9033 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9034 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9035 			aprint_error_dev(sc->sc_dev,
   9036 			    "%s: failed to get semaphore\n", __func__);
   9037 			return;
   9038 		}
   9039 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9040 		if (wm_get_swfwhw_semaphore(sc)) {
   9041 			aprint_error_dev(sc->sc_dev,
   9042 			    "%s: failed to get semaphore\n", __func__);
   9043 			return;
   9044 		}
   9045 	}
   9046 
   9047 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9048 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9049 	    (val & KUMCTRLSTA_MASK));
   9050 
   9051 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9052 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9053 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9054 		wm_put_swfwhw_semaphore(sc);
   9055 }
   9056 
   9057 /* SGMII related */
   9058 
   9059 /*
   9060  * wm_sgmii_uses_mdio
   9061  *
   9062  * Check whether the transaction is to the internal PHY or the external
   9063  * MDIO interface. Return true if it's MDIO.
   9064  */
   9065 static bool
   9066 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9067 {
   9068 	uint32_t reg;
   9069 	bool ismdio = false;
   9070 
   9071 	switch (sc->sc_type) {
   9072 	case WM_T_82575:
   9073 	case WM_T_82576:
   9074 		reg = CSR_READ(sc, WMREG_MDIC);
   9075 		ismdio = ((reg & MDIC_DEST) != 0);
   9076 		break;
   9077 	case WM_T_82580:
   9078 	case WM_T_I350:
   9079 	case WM_T_I354:
   9080 	case WM_T_I210:
   9081 	case WM_T_I211:
   9082 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9083 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9084 		break;
   9085 	default:
   9086 		break;
   9087 	}
   9088 
   9089 	return ismdio;
   9090 }
   9091 
   9092 /*
   9093  * wm_sgmii_readreg:	[mii interface function]
   9094  *
   9095  *	Read a PHY register on the SGMII
   9096  * This could be handled by the PHY layer if we didn't have to lock the
   9097  * ressource ...
   9098  */
   9099 static int
   9100 wm_sgmii_readreg(device_t self, int phy, int reg)
   9101 {
   9102 	struct wm_softc *sc = device_private(self);
   9103 	uint32_t i2ccmd;
   9104 	int i, rv;
   9105 
   9106 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9107 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9108 		    __func__);
   9109 		return 0;
   9110 	}
   9111 
   9112 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9113 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9114 	    | I2CCMD_OPCODE_READ;
   9115 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9116 
   9117 	/* Poll the ready bit */
   9118 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9119 		delay(50);
   9120 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9121 		if (i2ccmd & I2CCMD_READY)
   9122 			break;
   9123 	}
   9124 	if ((i2ccmd & I2CCMD_READY) == 0)
   9125 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9126 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9127 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9128 
   9129 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9130 
   9131 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9132 	return rv;
   9133 }
   9134 
   9135 /*
   9136  * wm_sgmii_writereg:	[mii interface function]
   9137  *
   9138  *	Write a PHY register on the SGMII.
   9139  * This could be handled by the PHY layer if we didn't have to lock the
   9140  * ressource ...
   9141  */
   9142 static void
   9143 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9144 {
   9145 	struct wm_softc *sc = device_private(self);
   9146 	uint32_t i2ccmd;
   9147 	int i;
   9148 	int val_swapped;
   9149 
   9150 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9151 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9152 		    __func__);
   9153 		return;
   9154 	}
   9155 	/* Swap the data bytes for the I2C interface */
   9156 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9157 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9158 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9159 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9160 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9161 
   9162 	/* Poll the ready bit */
   9163 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9164 		delay(50);
   9165 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9166 		if (i2ccmd & I2CCMD_READY)
   9167 			break;
   9168 	}
   9169 	if ((i2ccmd & I2CCMD_READY) == 0)
   9170 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9171 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9172 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9173 
   9174 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9175 }
   9176 
   9177 /* TBI related */
   9178 
   9179 /*
   9180  * wm_tbi_mediainit:
   9181  *
   9182  *	Initialize media for use on 1000BASE-X devices.
   9183  */
   9184 static void
   9185 wm_tbi_mediainit(struct wm_softc *sc)
   9186 {
   9187 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9188 	const char *sep = "";
   9189 
   9190 	if (sc->sc_type < WM_T_82543)
   9191 		sc->sc_tipg = TIPG_WM_DFLT;
   9192 	else
   9193 		sc->sc_tipg = TIPG_LG_DFLT;
   9194 
   9195 	sc->sc_tbi_serdes_anegticks = 5;
   9196 
   9197 	/* Initialize our media structures */
   9198 	sc->sc_mii.mii_ifp = ifp;
   9199 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9200 
   9201 	if ((sc->sc_type >= WM_T_82575)
   9202 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9203 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9204 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9205 	else
   9206 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9207 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9208 
   9209 	/*
   9210 	 * SWD Pins:
   9211 	 *
   9212 	 *	0 = Link LED (output)
   9213 	 *	1 = Loss Of Signal (input)
   9214 	 */
   9215 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9216 
   9217 	/* XXX Perhaps this is only for TBI */
   9218 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9219 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9220 
   9221 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9222 		sc->sc_ctrl &= ~CTRL_LRST;
   9223 
   9224 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9225 
   9226 #define	ADD(ss, mm, dd)							\
   9227 do {									\
   9228 	aprint_normal("%s%s", sep, ss);					\
   9229 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9230 	sep = ", ";							\
   9231 } while (/*CONSTCOND*/0)
   9232 
   9233 	aprint_normal_dev(sc->sc_dev, "");
   9234 
   9235 	/* Only 82545 is LX */
   9236 	if (sc->sc_type == WM_T_82545) {
   9237 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9238 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9239 	} else {
   9240 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9241 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9242 	}
   9243 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9244 	aprint_normal("\n");
   9245 
   9246 #undef ADD
   9247 
   9248 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9249 }
   9250 
   9251 /*
   9252  * wm_tbi_mediachange:	[ifmedia interface function]
   9253  *
   9254  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9255  */
   9256 static int
   9257 wm_tbi_mediachange(struct ifnet *ifp)
   9258 {
   9259 	struct wm_softc *sc = ifp->if_softc;
   9260 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9261 	uint32_t status;
   9262 	int i;
   9263 
   9264 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9265 		/* XXX need some work for >= 82571 and < 82575 */
   9266 		if (sc->sc_type < WM_T_82575)
   9267 			return 0;
   9268 	}
   9269 
   9270 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9271 	    || (sc->sc_type >= WM_T_82575))
   9272 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9273 
   9274 	sc->sc_ctrl &= ~CTRL_LRST;
   9275 	sc->sc_txcw = TXCW_ANE;
   9276 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9277 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9278 	else if (ife->ifm_media & IFM_FDX)
   9279 		sc->sc_txcw |= TXCW_FD;
   9280 	else
   9281 		sc->sc_txcw |= TXCW_HD;
   9282 
   9283 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9284 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9285 
   9286 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9287 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9288 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9289 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9290 	CSR_WRITE_FLUSH(sc);
   9291 	delay(1000);
   9292 
   9293 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9294 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9295 
   9296 	/*
   9297 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9298 	 * optics detect a signal, 0 if they don't.
   9299 	 */
   9300 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9301 		/* Have signal; wait for the link to come up. */
   9302 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9303 			delay(10000);
   9304 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9305 				break;
   9306 		}
   9307 
   9308 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9309 			    device_xname(sc->sc_dev),i));
   9310 
   9311 		status = CSR_READ(sc, WMREG_STATUS);
   9312 		DPRINTF(WM_DEBUG_LINK,
   9313 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9314 			device_xname(sc->sc_dev),status, STATUS_LU));
   9315 		if (status & STATUS_LU) {
   9316 			/* Link is up. */
   9317 			DPRINTF(WM_DEBUG_LINK,
   9318 			    ("%s: LINK: set media -> link up %s\n",
   9319 			    device_xname(sc->sc_dev),
   9320 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9321 
   9322 			/*
   9323 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9324 			 * so we should update sc->sc_ctrl
   9325 			 */
   9326 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9327 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9328 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9329 			if (status & STATUS_FD)
   9330 				sc->sc_tctl |=
   9331 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9332 			else
   9333 				sc->sc_tctl |=
   9334 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9335 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9336 				sc->sc_fcrtl |= FCRTL_XONE;
   9337 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9338 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9339 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9340 				      sc->sc_fcrtl);
   9341 			sc->sc_tbi_linkup = 1;
   9342 		} else {
   9343 			if (i == WM_LINKUP_TIMEOUT)
   9344 				wm_check_for_link(sc);
   9345 			/* Link is down. */
   9346 			DPRINTF(WM_DEBUG_LINK,
   9347 			    ("%s: LINK: set media -> link down\n",
   9348 			    device_xname(sc->sc_dev)));
   9349 			sc->sc_tbi_linkup = 0;
   9350 		}
   9351 	} else {
   9352 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9353 		    device_xname(sc->sc_dev)));
   9354 		sc->sc_tbi_linkup = 0;
   9355 	}
   9356 
   9357 	wm_tbi_serdes_set_linkled(sc);
   9358 
   9359 	return 0;
   9360 }
   9361 
   9362 /*
   9363  * wm_tbi_mediastatus:	[ifmedia interface function]
   9364  *
   9365  *	Get the current interface media status on a 1000BASE-X device.
   9366  */
   9367 static void
   9368 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9369 {
   9370 	struct wm_softc *sc = ifp->if_softc;
   9371 	uint32_t ctrl, status;
   9372 
   9373 	ifmr->ifm_status = IFM_AVALID;
   9374 	ifmr->ifm_active = IFM_ETHER;
   9375 
   9376 	status = CSR_READ(sc, WMREG_STATUS);
   9377 	if ((status & STATUS_LU) == 0) {
   9378 		ifmr->ifm_active |= IFM_NONE;
   9379 		return;
   9380 	}
   9381 
   9382 	ifmr->ifm_status |= IFM_ACTIVE;
   9383 	/* Only 82545 is LX */
   9384 	if (sc->sc_type == WM_T_82545)
   9385 		ifmr->ifm_active |= IFM_1000_LX;
   9386 	else
   9387 		ifmr->ifm_active |= IFM_1000_SX;
   9388 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9389 		ifmr->ifm_active |= IFM_FDX;
   9390 	else
   9391 		ifmr->ifm_active |= IFM_HDX;
   9392 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9393 	if (ctrl & CTRL_RFCE)
   9394 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9395 	if (ctrl & CTRL_TFCE)
   9396 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9397 }
   9398 
   9399 /* XXX TBI only */
   9400 static int
   9401 wm_check_for_link(struct wm_softc *sc)
   9402 {
   9403 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9404 	uint32_t rxcw;
   9405 	uint32_t ctrl;
   9406 	uint32_t status;
   9407 	uint32_t sig;
   9408 
   9409 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9410 		/* XXX need some work for >= 82571 */
   9411 		if (sc->sc_type >= WM_T_82571) {
   9412 			sc->sc_tbi_linkup = 1;
   9413 			return 0;
   9414 		}
   9415 	}
   9416 
   9417 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9418 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9419 	status = CSR_READ(sc, WMREG_STATUS);
   9420 
   9421 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9422 
   9423 	DPRINTF(WM_DEBUG_LINK,
   9424 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9425 		device_xname(sc->sc_dev), __func__,
   9426 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9427 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9428 
   9429 	/*
   9430 	 * SWDPIN   LU RXCW
   9431 	 *      0    0    0
   9432 	 *      0    0    1	(should not happen)
   9433 	 *      0    1    0	(should not happen)
   9434 	 *      0    1    1	(should not happen)
   9435 	 *      1    0    0	Disable autonego and force linkup
   9436 	 *      1    0    1	got /C/ but not linkup yet
   9437 	 *      1    1    0	(linkup)
   9438 	 *      1    1    1	If IFM_AUTO, back to autonego
   9439 	 *
   9440 	 */
   9441 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9442 	    && ((status & STATUS_LU) == 0)
   9443 	    && ((rxcw & RXCW_C) == 0)) {
   9444 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9445 			__func__));
   9446 		sc->sc_tbi_linkup = 0;
   9447 		/* Disable auto-negotiation in the TXCW register */
   9448 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9449 
   9450 		/*
   9451 		 * Force link-up and also force full-duplex.
   9452 		 *
   9453 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9454 		 * so we should update sc->sc_ctrl
   9455 		 */
   9456 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9457 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9458 	} else if (((status & STATUS_LU) != 0)
   9459 	    && ((rxcw & RXCW_C) != 0)
   9460 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9461 		sc->sc_tbi_linkup = 1;
   9462 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9463 			__func__));
   9464 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9465 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9466 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9467 	    && ((rxcw & RXCW_C) != 0)) {
   9468 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9469 	} else {
   9470 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9471 			status));
   9472 	}
   9473 
   9474 	return 0;
   9475 }
   9476 
   9477 /*
   9478  * wm_tbi_tick:
   9479  *
   9480  *	Check the link on TBI devices.
   9481  *	This function acts as mii_tick().
   9482  */
   9483 static void
   9484 wm_tbi_tick(struct wm_softc *sc)
   9485 {
   9486 	struct mii_data *mii = &sc->sc_mii;
   9487 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9488 	uint32_t status;
   9489 
   9490 	KASSERT(WM_CORE_LOCKED(sc));
   9491 
   9492 	status = CSR_READ(sc, WMREG_STATUS);
   9493 
   9494 	/* XXX is this needed? */
   9495 	(void)CSR_READ(sc, WMREG_RXCW);
   9496 	(void)CSR_READ(sc, WMREG_CTRL);
   9497 
   9498 	/* set link status */
   9499 	if ((status & STATUS_LU) == 0) {
   9500 		DPRINTF(WM_DEBUG_LINK,
   9501 		    ("%s: LINK: checklink -> down\n",
   9502 			device_xname(sc->sc_dev)));
   9503 		sc->sc_tbi_linkup = 0;
   9504 	} else if (sc->sc_tbi_linkup == 0) {
   9505 		DPRINTF(WM_DEBUG_LINK,
   9506 		    ("%s: LINK: checklink -> up %s\n",
   9507 			device_xname(sc->sc_dev),
   9508 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9509 		sc->sc_tbi_linkup = 1;
   9510 		sc->sc_tbi_serdes_ticks = 0;
   9511 	}
   9512 
   9513 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9514 		goto setled;
   9515 
   9516 	if ((status & STATUS_LU) == 0) {
   9517 		sc->sc_tbi_linkup = 0;
   9518 		/* If the timer expired, retry autonegotiation */
   9519 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9520 		    && (++sc->sc_tbi_serdes_ticks
   9521 			>= sc->sc_tbi_serdes_anegticks)) {
   9522 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9523 			sc->sc_tbi_serdes_ticks = 0;
   9524 			/*
   9525 			 * Reset the link, and let autonegotiation do
   9526 			 * its thing
   9527 			 */
   9528 			sc->sc_ctrl |= CTRL_LRST;
   9529 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9530 			CSR_WRITE_FLUSH(sc);
   9531 			delay(1000);
   9532 			sc->sc_ctrl &= ~CTRL_LRST;
   9533 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9534 			CSR_WRITE_FLUSH(sc);
   9535 			delay(1000);
   9536 			CSR_WRITE(sc, WMREG_TXCW,
   9537 			    sc->sc_txcw & ~TXCW_ANE);
   9538 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9539 		}
   9540 	}
   9541 
   9542 setled:
   9543 	wm_tbi_serdes_set_linkled(sc);
   9544 }
   9545 
   9546 /* SERDES related */
   9547 static void
   9548 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9549 {
   9550 	uint32_t reg;
   9551 
   9552 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9553 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9554 		return;
   9555 
   9556 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9557 	reg |= PCS_CFG_PCS_EN;
   9558 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9559 
   9560 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9561 	reg &= ~CTRL_EXT_SWDPIN(3);
   9562 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9563 	CSR_WRITE_FLUSH(sc);
   9564 }
   9565 
   9566 static int
   9567 wm_serdes_mediachange(struct ifnet *ifp)
   9568 {
   9569 	struct wm_softc *sc = ifp->if_softc;
   9570 	bool pcs_autoneg = true; /* XXX */
   9571 	uint32_t ctrl_ext, pcs_lctl, reg;
   9572 
   9573 	/* XXX Currently, this function is not called on 8257[12] */
   9574 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9575 	    || (sc->sc_type >= WM_T_82575))
   9576 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9577 
   9578 	wm_serdes_power_up_link_82575(sc);
   9579 
   9580 	sc->sc_ctrl |= CTRL_SLU;
   9581 
   9582 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9583 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9584 
   9585 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9586 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9587 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9588 	case CTRL_EXT_LINK_MODE_SGMII:
   9589 		pcs_autoneg = true;
   9590 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9591 		break;
   9592 	case CTRL_EXT_LINK_MODE_1000KX:
   9593 		pcs_autoneg = false;
   9594 		/* FALLTHROUGH */
   9595 	default:
   9596 		if ((sc->sc_type == WM_T_82575)
   9597 		    || (sc->sc_type == WM_T_82576)) {
   9598 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9599 				pcs_autoneg = false;
   9600 		}
   9601 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9602 		    | CTRL_FRCFDX;
   9603 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9604 	}
   9605 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9606 
   9607 	if (pcs_autoneg) {
   9608 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9609 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9610 
   9611 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9612 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9613 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9614 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9615 	} else
   9616 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9617 
   9618 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9619 
   9620 
   9621 	return 0;
   9622 }
   9623 
   9624 static void
   9625 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9626 {
   9627 	struct wm_softc *sc = ifp->if_softc;
   9628 	struct mii_data *mii = &sc->sc_mii;
   9629 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9630 	uint32_t pcs_adv, pcs_lpab, reg;
   9631 
   9632 	ifmr->ifm_status = IFM_AVALID;
   9633 	ifmr->ifm_active = IFM_ETHER;
   9634 
   9635 	/* Check PCS */
   9636 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9637 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9638 		ifmr->ifm_active |= IFM_NONE;
   9639 		sc->sc_tbi_linkup = 0;
   9640 		goto setled;
   9641 	}
   9642 
   9643 	sc->sc_tbi_linkup = 1;
   9644 	ifmr->ifm_status |= IFM_ACTIVE;
   9645 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9646 	if ((reg & PCS_LSTS_FDX) != 0)
   9647 		ifmr->ifm_active |= IFM_FDX;
   9648 	else
   9649 		ifmr->ifm_active |= IFM_HDX;
   9650 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9651 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9652 		/* Check flow */
   9653 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9654 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9655 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9656 			goto setled;
   9657 		}
   9658 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9659 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9660 		DPRINTF(WM_DEBUG_LINK,
   9661 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9662 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9663 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9664 			mii->mii_media_active |= IFM_FLOW
   9665 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9666 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9667 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9668 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9669 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9670 			mii->mii_media_active |= IFM_FLOW
   9671 			    | IFM_ETH_TXPAUSE;
   9672 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9673 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9674 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9675 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9676 			mii->mii_media_active |= IFM_FLOW
   9677 			    | IFM_ETH_RXPAUSE;
   9678 		} else {
   9679 		}
   9680 	}
   9681 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9682 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9683 setled:
   9684 	wm_tbi_serdes_set_linkled(sc);
   9685 }
   9686 
   9687 /*
   9688  * wm_serdes_tick:
   9689  *
   9690  *	Check the link on serdes devices.
   9691  */
   9692 static void
   9693 wm_serdes_tick(struct wm_softc *sc)
   9694 {
   9695 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9696 	struct mii_data *mii = &sc->sc_mii;
   9697 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9698 	uint32_t reg;
   9699 
   9700 	KASSERT(WM_CORE_LOCKED(sc));
   9701 
   9702 	mii->mii_media_status = IFM_AVALID;
   9703 	mii->mii_media_active = IFM_ETHER;
   9704 
   9705 	/* Check PCS */
   9706 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9707 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9708 		mii->mii_media_status |= IFM_ACTIVE;
   9709 		sc->sc_tbi_linkup = 1;
   9710 		sc->sc_tbi_serdes_ticks = 0;
   9711 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9712 		if ((reg & PCS_LSTS_FDX) != 0)
   9713 			mii->mii_media_active |= IFM_FDX;
   9714 		else
   9715 			mii->mii_media_active |= IFM_HDX;
   9716 	} else {
   9717 		mii->mii_media_status |= IFM_NONE;
   9718 		sc->sc_tbi_linkup = 0;
   9719 		    /* If the timer expired, retry autonegotiation */
   9720 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9721 		    && (++sc->sc_tbi_serdes_ticks
   9722 			>= sc->sc_tbi_serdes_anegticks)) {
   9723 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9724 			sc->sc_tbi_serdes_ticks = 0;
   9725 			/* XXX */
   9726 			wm_serdes_mediachange(ifp);
   9727 		}
   9728 	}
   9729 
   9730 	wm_tbi_serdes_set_linkled(sc);
   9731 }
   9732 
   9733 /* SFP related */
   9734 
   9735 static int
   9736 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9737 {
   9738 	uint32_t i2ccmd;
   9739 	int i;
   9740 
   9741 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9742 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9743 
   9744 	/* Poll the ready bit */
   9745 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9746 		delay(50);
   9747 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9748 		if (i2ccmd & I2CCMD_READY)
   9749 			break;
   9750 	}
   9751 	if ((i2ccmd & I2CCMD_READY) == 0)
   9752 		return -1;
   9753 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9754 		return -1;
   9755 
   9756 	*data = i2ccmd & 0x00ff;
   9757 
   9758 	return 0;
   9759 }
   9760 
   9761 static uint32_t
   9762 wm_sfp_get_media_type(struct wm_softc *sc)
   9763 {
   9764 	uint32_t ctrl_ext;
   9765 	uint8_t val = 0;
   9766 	int timeout = 3;
   9767 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9768 	int rv = -1;
   9769 
   9770 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9771 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9772 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9773 	CSR_WRITE_FLUSH(sc);
   9774 
   9775 	/* Read SFP module data */
   9776 	while (timeout) {
   9777 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9778 		if (rv == 0)
   9779 			break;
   9780 		delay(100*1000); /* XXX too big */
   9781 		timeout--;
   9782 	}
   9783 	if (rv != 0)
   9784 		goto out;
   9785 	switch (val) {
   9786 	case SFF_SFP_ID_SFF:
   9787 		aprint_normal_dev(sc->sc_dev,
   9788 		    "Module/Connector soldered to board\n");
   9789 		break;
   9790 	case SFF_SFP_ID_SFP:
   9791 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9792 		break;
   9793 	case SFF_SFP_ID_UNKNOWN:
   9794 		goto out;
   9795 	default:
   9796 		break;
   9797 	}
   9798 
   9799 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9800 	if (rv != 0) {
   9801 		goto out;
   9802 	}
   9803 
   9804 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9805 		mediatype = WM_MEDIATYPE_SERDES;
   9806 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9807 		sc->sc_flags |= WM_F_SGMII;
   9808 		mediatype = WM_MEDIATYPE_COPPER;
   9809 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9810 		sc->sc_flags |= WM_F_SGMII;
   9811 		mediatype = WM_MEDIATYPE_SERDES;
   9812 	}
   9813 
   9814 out:
   9815 	/* Restore I2C interface setting */
   9816 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9817 
   9818 	return mediatype;
   9819 }
   9820 /*
   9821  * NVM related.
   9822  * Microwire, SPI (w/wo EERD) and Flash.
   9823  */
   9824 
   9825 /* Both spi and uwire */
   9826 
   9827 /*
   9828  * wm_eeprom_sendbits:
   9829  *
   9830  *	Send a series of bits to the EEPROM.
   9831  */
   9832 static void
   9833 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9834 {
   9835 	uint32_t reg;
   9836 	int x;
   9837 
   9838 	reg = CSR_READ(sc, WMREG_EECD);
   9839 
   9840 	for (x = nbits; x > 0; x--) {
   9841 		if (bits & (1U << (x - 1)))
   9842 			reg |= EECD_DI;
   9843 		else
   9844 			reg &= ~EECD_DI;
   9845 		CSR_WRITE(sc, WMREG_EECD, reg);
   9846 		CSR_WRITE_FLUSH(sc);
   9847 		delay(2);
   9848 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9849 		CSR_WRITE_FLUSH(sc);
   9850 		delay(2);
   9851 		CSR_WRITE(sc, WMREG_EECD, reg);
   9852 		CSR_WRITE_FLUSH(sc);
   9853 		delay(2);
   9854 	}
   9855 }
   9856 
   9857 /*
   9858  * wm_eeprom_recvbits:
   9859  *
   9860  *	Receive a series of bits from the EEPROM.
   9861  */
   9862 static void
   9863 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9864 {
   9865 	uint32_t reg, val;
   9866 	int x;
   9867 
   9868 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9869 
   9870 	val = 0;
   9871 	for (x = nbits; x > 0; x--) {
   9872 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9873 		CSR_WRITE_FLUSH(sc);
   9874 		delay(2);
   9875 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9876 			val |= (1U << (x - 1));
   9877 		CSR_WRITE(sc, WMREG_EECD, reg);
   9878 		CSR_WRITE_FLUSH(sc);
   9879 		delay(2);
   9880 	}
   9881 	*valp = val;
   9882 }
   9883 
   9884 /* Microwire */
   9885 
   9886 /*
   9887  * wm_nvm_read_uwire:
   9888  *
   9889  *	Read a word from the EEPROM using the MicroWire protocol.
   9890  */
   9891 static int
   9892 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9893 {
   9894 	uint32_t reg, val;
   9895 	int i;
   9896 
   9897 	for (i = 0; i < wordcnt; i++) {
   9898 		/* Clear SK and DI. */
   9899 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9900 		CSR_WRITE(sc, WMREG_EECD, reg);
   9901 
   9902 		/*
   9903 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9904 		 * and Xen.
   9905 		 *
   9906 		 * We use this workaround only for 82540 because qemu's
   9907 		 * e1000 act as 82540.
   9908 		 */
   9909 		if (sc->sc_type == WM_T_82540) {
   9910 			reg |= EECD_SK;
   9911 			CSR_WRITE(sc, WMREG_EECD, reg);
   9912 			reg &= ~EECD_SK;
   9913 			CSR_WRITE(sc, WMREG_EECD, reg);
   9914 			CSR_WRITE_FLUSH(sc);
   9915 			delay(2);
   9916 		}
   9917 		/* XXX: end of workaround */
   9918 
   9919 		/* Set CHIP SELECT. */
   9920 		reg |= EECD_CS;
   9921 		CSR_WRITE(sc, WMREG_EECD, reg);
   9922 		CSR_WRITE_FLUSH(sc);
   9923 		delay(2);
   9924 
   9925 		/* Shift in the READ command. */
   9926 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9927 
   9928 		/* Shift in address. */
   9929 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9930 
   9931 		/* Shift out the data. */
   9932 		wm_eeprom_recvbits(sc, &val, 16);
   9933 		data[i] = val & 0xffff;
   9934 
   9935 		/* Clear CHIP SELECT. */
   9936 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9937 		CSR_WRITE(sc, WMREG_EECD, reg);
   9938 		CSR_WRITE_FLUSH(sc);
   9939 		delay(2);
   9940 	}
   9941 
   9942 	return 0;
   9943 }
   9944 
   9945 /* SPI */
   9946 
   9947 /*
   9948  * Set SPI and FLASH related information from the EECD register.
   9949  * For 82541 and 82547, the word size is taken from EEPROM.
   9950  */
   9951 static int
   9952 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9953 {
   9954 	int size;
   9955 	uint32_t reg;
   9956 	uint16_t data;
   9957 
   9958 	reg = CSR_READ(sc, WMREG_EECD);
   9959 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9960 
   9961 	/* Read the size of NVM from EECD by default */
   9962 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9963 	switch (sc->sc_type) {
   9964 	case WM_T_82541:
   9965 	case WM_T_82541_2:
   9966 	case WM_T_82547:
   9967 	case WM_T_82547_2:
   9968 		/* Set dummy value to access EEPROM */
   9969 		sc->sc_nvm_wordsize = 64;
   9970 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9971 		reg = data;
   9972 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9973 		if (size == 0)
   9974 			size = 6; /* 64 word size */
   9975 		else
   9976 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9977 		break;
   9978 	case WM_T_80003:
   9979 	case WM_T_82571:
   9980 	case WM_T_82572:
   9981 	case WM_T_82573: /* SPI case */
   9982 	case WM_T_82574: /* SPI case */
   9983 	case WM_T_82583: /* SPI case */
   9984 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9985 		if (size > 14)
   9986 			size = 14;
   9987 		break;
   9988 	case WM_T_82575:
   9989 	case WM_T_82576:
   9990 	case WM_T_82580:
   9991 	case WM_T_I350:
   9992 	case WM_T_I354:
   9993 	case WM_T_I210:
   9994 	case WM_T_I211:
   9995 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9996 		if (size > 15)
   9997 			size = 15;
   9998 		break;
   9999 	default:
   10000 		aprint_error_dev(sc->sc_dev,
   10001 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10002 		return -1;
   10003 		break;
   10004 	}
   10005 
   10006 	sc->sc_nvm_wordsize = 1 << size;
   10007 
   10008 	return 0;
   10009 }
   10010 
   10011 /*
   10012  * wm_nvm_ready_spi:
   10013  *
   10014  *	Wait for a SPI EEPROM to be ready for commands.
   10015  */
   10016 static int
   10017 wm_nvm_ready_spi(struct wm_softc *sc)
   10018 {
   10019 	uint32_t val;
   10020 	int usec;
   10021 
   10022 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10023 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10024 		wm_eeprom_recvbits(sc, &val, 8);
   10025 		if ((val & SPI_SR_RDY) == 0)
   10026 			break;
   10027 	}
   10028 	if (usec >= SPI_MAX_RETRIES) {
   10029 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10030 		return 1;
   10031 	}
   10032 	return 0;
   10033 }
   10034 
   10035 /*
   10036  * wm_nvm_read_spi:
   10037  *
   10038  *	Read a work from the EEPROM using the SPI protocol.
   10039  */
   10040 static int
   10041 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10042 {
   10043 	uint32_t reg, val;
   10044 	int i;
   10045 	uint8_t opc;
   10046 
   10047 	/* Clear SK and CS. */
   10048 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10049 	CSR_WRITE(sc, WMREG_EECD, reg);
   10050 	CSR_WRITE_FLUSH(sc);
   10051 	delay(2);
   10052 
   10053 	if (wm_nvm_ready_spi(sc))
   10054 		return 1;
   10055 
   10056 	/* Toggle CS to flush commands. */
   10057 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10058 	CSR_WRITE_FLUSH(sc);
   10059 	delay(2);
   10060 	CSR_WRITE(sc, WMREG_EECD, reg);
   10061 	CSR_WRITE_FLUSH(sc);
   10062 	delay(2);
   10063 
   10064 	opc = SPI_OPC_READ;
   10065 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10066 		opc |= SPI_OPC_A8;
   10067 
   10068 	wm_eeprom_sendbits(sc, opc, 8);
   10069 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10070 
   10071 	for (i = 0; i < wordcnt; i++) {
   10072 		wm_eeprom_recvbits(sc, &val, 16);
   10073 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10074 	}
   10075 
   10076 	/* Raise CS and clear SK. */
   10077 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10078 	CSR_WRITE(sc, WMREG_EECD, reg);
   10079 	CSR_WRITE_FLUSH(sc);
   10080 	delay(2);
   10081 
   10082 	return 0;
   10083 }
   10084 
   10085 /* Using with EERD */
   10086 
   10087 static int
   10088 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10089 {
   10090 	uint32_t attempts = 100000;
   10091 	uint32_t i, reg = 0;
   10092 	int32_t done = -1;
   10093 
   10094 	for (i = 0; i < attempts; i++) {
   10095 		reg = CSR_READ(sc, rw);
   10096 
   10097 		if (reg & EERD_DONE) {
   10098 			done = 0;
   10099 			break;
   10100 		}
   10101 		delay(5);
   10102 	}
   10103 
   10104 	return done;
   10105 }
   10106 
   10107 static int
   10108 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10109     uint16_t *data)
   10110 {
   10111 	int i, eerd = 0;
   10112 	int error = 0;
   10113 
   10114 	for (i = 0; i < wordcnt; i++) {
   10115 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10116 
   10117 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10118 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10119 		if (error != 0)
   10120 			break;
   10121 
   10122 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10123 	}
   10124 
   10125 	return error;
   10126 }
   10127 
   10128 /* Flash */
   10129 
   10130 static int
   10131 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10132 {
   10133 	uint32_t eecd;
   10134 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10135 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10136 	uint8_t sig_byte = 0;
   10137 
   10138 	switch (sc->sc_type) {
   10139 	case WM_T_PCH_SPT:
   10140 		/*
   10141 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10142 		 * sector valid bits from the NVM.
   10143 		 */
   10144 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10145 		if ((*bank == 0) || (*bank == 1)) {
   10146 			aprint_error_dev(sc->sc_dev,
   10147 					 "%s: no valid NVM bank present\n",
   10148 				__func__);
   10149 			return -1;
   10150 		} else {
   10151 			*bank = *bank - 2;
   10152 			return 0;
   10153 		}
   10154 	case WM_T_ICH8:
   10155 	case WM_T_ICH9:
   10156 		eecd = CSR_READ(sc, WMREG_EECD);
   10157 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10158 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10159 			return 0;
   10160 		}
   10161 		/* FALLTHROUGH */
   10162 	default:
   10163 		/* Default to 0 */
   10164 		*bank = 0;
   10165 
   10166 		/* Check bank 0 */
   10167 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10168 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10169 			*bank = 0;
   10170 			return 0;
   10171 		}
   10172 
   10173 		/* Check bank 1 */
   10174 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10175 		    &sig_byte);
   10176 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10177 			*bank = 1;
   10178 			return 0;
   10179 		}
   10180 	}
   10181 
   10182 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10183 		device_xname(sc->sc_dev)));
   10184 	return -1;
   10185 }
   10186 
   10187 /******************************************************************************
   10188  * This function does initial flash setup so that a new read/write/erase cycle
   10189  * can be started.
   10190  *
   10191  * sc - The pointer to the hw structure
   10192  ****************************************************************************/
   10193 static int32_t
   10194 wm_ich8_cycle_init(struct wm_softc *sc)
   10195 {
   10196 	uint16_t hsfsts;
   10197 	int32_t error = 1;
   10198 	int32_t i     = 0;
   10199 
   10200 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10201 
   10202 	/* May be check the Flash Des Valid bit in Hw status */
   10203 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10204 		return error;
   10205 	}
   10206 
   10207 	/* Clear FCERR in Hw status by writing 1 */
   10208 	/* Clear DAEL in Hw status by writing a 1 */
   10209 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10210 
   10211 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10212 
   10213 	/*
   10214 	 * Either we should have a hardware SPI cycle in progress bit to check
   10215 	 * against, in order to start a new cycle or FDONE bit should be
   10216 	 * changed in the hardware so that it is 1 after harware reset, which
   10217 	 * can then be used as an indication whether a cycle is in progress or
   10218 	 * has been completed .. we should also have some software semaphore
   10219 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10220 	 * threads access to those bits can be sequentiallized or a way so that
   10221 	 * 2 threads dont start the cycle at the same time
   10222 	 */
   10223 
   10224 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10225 		/*
   10226 		 * There is no cycle running at present, so we can start a
   10227 		 * cycle
   10228 		 */
   10229 
   10230 		/* Begin by setting Flash Cycle Done. */
   10231 		hsfsts |= HSFSTS_DONE;
   10232 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10233 		error = 0;
   10234 	} else {
   10235 		/*
   10236 		 * otherwise poll for sometime so the current cycle has a
   10237 		 * chance to end before giving up.
   10238 		 */
   10239 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10240 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10241 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10242 				error = 0;
   10243 				break;
   10244 			}
   10245 			delay(1);
   10246 		}
   10247 		if (error == 0) {
   10248 			/*
   10249 			 * Successful in waiting for previous cycle to timeout,
   10250 			 * now set the Flash Cycle Done.
   10251 			 */
   10252 			hsfsts |= HSFSTS_DONE;
   10253 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10254 		}
   10255 	}
   10256 	return error;
   10257 }
   10258 
   10259 /******************************************************************************
   10260  * This function starts a flash cycle and waits for its completion
   10261  *
   10262  * sc - The pointer to the hw structure
   10263  ****************************************************************************/
   10264 static int32_t
   10265 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10266 {
   10267 	uint16_t hsflctl;
   10268 	uint16_t hsfsts;
   10269 	int32_t error = 1;
   10270 	uint32_t i = 0;
   10271 
   10272 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10273 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10274 	hsflctl |= HSFCTL_GO;
   10275 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10276 
   10277 	/* Wait till FDONE bit is set to 1 */
   10278 	do {
   10279 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10280 		if (hsfsts & HSFSTS_DONE)
   10281 			break;
   10282 		delay(1);
   10283 		i++;
   10284 	} while (i < timeout);
   10285 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10286 		error = 0;
   10287 
   10288 	return error;
   10289 }
   10290 
   10291 /******************************************************************************
   10292  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10293  *
   10294  * sc - The pointer to the hw structure
   10295  * index - The index of the byte or word to read.
   10296  * size - Size of data to read, 1=byte 2=word, 4=dword
   10297  * data - Pointer to the word to store the value read.
   10298  *****************************************************************************/
   10299 static int32_t
   10300 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10301     uint32_t size, uint32_t *data)
   10302 {
   10303 	uint16_t hsfsts;
   10304 	uint16_t hsflctl;
   10305 	uint32_t flash_linear_address;
   10306 	uint32_t flash_data = 0;
   10307 	int32_t error = 1;
   10308 	int32_t count = 0;
   10309 
   10310 	if (size < 1  || size > 4 || data == 0x0 ||
   10311 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10312 		return error;
   10313 
   10314 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10315 	    sc->sc_ich8_flash_base;
   10316 
   10317 	do {
   10318 		delay(1);
   10319 		/* Steps */
   10320 		error = wm_ich8_cycle_init(sc);
   10321 		if (error)
   10322 			break;
   10323 
   10324 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10325 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10326 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10327 		    & HSFCTL_BCOUNT_MASK;
   10328 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10329 		if (sc->sc_type == WM_T_PCH_SPT) {
   10330 			/*
   10331 			 * In SPT, This register is in Lan memory space, not
   10332 			 * flash. Therefore, only 32 bit access is supported.
   10333 			 */
   10334 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10335 			    (uint32_t)hsflctl);
   10336 		} else
   10337 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10338 
   10339 		/*
   10340 		 * Write the last 24 bits of index into Flash Linear address
   10341 		 * field in Flash Address
   10342 		 */
   10343 		/* TODO: TBD maybe check the index against the size of flash */
   10344 
   10345 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10346 
   10347 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10348 
   10349 		/*
   10350 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10351 		 * the whole sequence a few more times, else read in (shift in)
   10352 		 * the Flash Data0, the order is least significant byte first
   10353 		 * msb to lsb
   10354 		 */
   10355 		if (error == 0) {
   10356 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10357 			if (size == 1)
   10358 				*data = (uint8_t)(flash_data & 0x000000FF);
   10359 			else if (size == 2)
   10360 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10361 			else if (size == 4)
   10362 				*data = (uint32_t)flash_data;
   10363 			break;
   10364 		} else {
   10365 			/*
   10366 			 * If we've gotten here, then things are probably
   10367 			 * completely hosed, but if the error condition is
   10368 			 * detected, it won't hurt to give it another try...
   10369 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10370 			 */
   10371 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10372 			if (hsfsts & HSFSTS_ERR) {
   10373 				/* Repeat for some time before giving up. */
   10374 				continue;
   10375 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10376 				break;
   10377 		}
   10378 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10379 
   10380 	return error;
   10381 }
   10382 
   10383 /******************************************************************************
   10384  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10385  *
   10386  * sc - pointer to wm_hw structure
   10387  * index - The index of the byte to read.
   10388  * data - Pointer to a byte to store the value read.
   10389  *****************************************************************************/
   10390 static int32_t
   10391 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10392 {
   10393 	int32_t status;
   10394 	uint32_t word = 0;
   10395 
   10396 	status = wm_read_ich8_data(sc, index, 1, &word);
   10397 	if (status == 0)
   10398 		*data = (uint8_t)word;
   10399 	else
   10400 		*data = 0;
   10401 
   10402 	return status;
   10403 }
   10404 
   10405 /******************************************************************************
   10406  * Reads a word from the NVM using the ICH8 flash access registers.
   10407  *
   10408  * sc - pointer to wm_hw structure
   10409  * index - The starting byte index of the word to read.
   10410  * data - Pointer to a word to store the value read.
   10411  *****************************************************************************/
   10412 static int32_t
   10413 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10414 {
   10415 	int32_t status;
   10416 	uint32_t word = 0;
   10417 
   10418 	status = wm_read_ich8_data(sc, index, 2, &word);
   10419 	if (status == 0)
   10420 		*data = (uint16_t)word;
   10421 	else
   10422 		*data = 0;
   10423 
   10424 	return status;
   10425 }
   10426 
   10427 /******************************************************************************
   10428  * Reads a dword from the NVM using the ICH8 flash access registers.
   10429  *
   10430  * sc - pointer to wm_hw structure
   10431  * index - The starting byte index of the word to read.
   10432  * data - Pointer to a word to store the value read.
   10433  *****************************************************************************/
   10434 static int32_t
   10435 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10436 {
   10437 	int32_t status;
   10438 
   10439 	status = wm_read_ich8_data(sc, index, 4, data);
   10440 	return status;
   10441 }
   10442 
   10443 /******************************************************************************
   10444  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10445  * register.
   10446  *
   10447  * sc - Struct containing variables accessed by shared code
   10448  * offset - offset of word in the EEPROM to read
   10449  * data - word read from the EEPROM
   10450  * words - number of words to read
   10451  *****************************************************************************/
   10452 static int
   10453 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10454 {
   10455 	int32_t  error = 0;
   10456 	uint32_t flash_bank = 0;
   10457 	uint32_t act_offset = 0;
   10458 	uint32_t bank_offset = 0;
   10459 	uint16_t word = 0;
   10460 	uint16_t i = 0;
   10461 
   10462 	/*
   10463 	 * We need to know which is the valid flash bank.  In the event
   10464 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10465 	 * managing flash_bank.  So it cannot be trusted and needs
   10466 	 * to be updated with each read.
   10467 	 */
   10468 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10469 	if (error) {
   10470 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10471 			device_xname(sc->sc_dev)));
   10472 		flash_bank = 0;
   10473 	}
   10474 
   10475 	/*
   10476 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10477 	 * size
   10478 	 */
   10479 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10480 
   10481 	error = wm_get_swfwhw_semaphore(sc);
   10482 	if (error) {
   10483 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10484 		    __func__);
   10485 		return error;
   10486 	}
   10487 
   10488 	for (i = 0; i < words; i++) {
   10489 		/* The NVM part needs a byte offset, hence * 2 */
   10490 		act_offset = bank_offset + ((offset + i) * 2);
   10491 		error = wm_read_ich8_word(sc, act_offset, &word);
   10492 		if (error) {
   10493 			aprint_error_dev(sc->sc_dev,
   10494 			    "%s: failed to read NVM\n", __func__);
   10495 			break;
   10496 		}
   10497 		data[i] = word;
   10498 	}
   10499 
   10500 	wm_put_swfwhw_semaphore(sc);
   10501 	return error;
   10502 }
   10503 
   10504 /******************************************************************************
   10505  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10506  * register.
   10507  *
   10508  * sc - Struct containing variables accessed by shared code
   10509  * offset - offset of word in the EEPROM to read
   10510  * data - word read from the EEPROM
   10511  * words - number of words to read
   10512  *****************************************************************************/
   10513 static int
   10514 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10515 {
   10516 	int32_t  error = 0;
   10517 	uint32_t flash_bank = 0;
   10518 	uint32_t act_offset = 0;
   10519 	uint32_t bank_offset = 0;
   10520 	uint32_t dword = 0;
   10521 	uint16_t i = 0;
   10522 
   10523 	/*
   10524 	 * We need to know which is the valid flash bank.  In the event
   10525 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10526 	 * managing flash_bank.  So it cannot be trusted and needs
   10527 	 * to be updated with each read.
   10528 	 */
   10529 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10530 	if (error) {
   10531 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10532 			device_xname(sc->sc_dev)));
   10533 		flash_bank = 0;
   10534 	}
   10535 
   10536 	/*
   10537 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10538 	 * size
   10539 	 */
   10540 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10541 
   10542 	error = wm_get_swfwhw_semaphore(sc);
   10543 	if (error) {
   10544 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10545 		    __func__);
   10546 		return error;
   10547 	}
   10548 
   10549 	for (i = 0; i < words; i++) {
   10550 		/* The NVM part needs a byte offset, hence * 2 */
   10551 		act_offset = bank_offset + ((offset + i) * 2);
   10552 		/* but we must read dword aligned, so mask ... */
   10553 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10554 		if (error) {
   10555 			aprint_error_dev(sc->sc_dev,
   10556 			    "%s: failed to read NVM\n", __func__);
   10557 			break;
   10558 		}
   10559 		/* ... and pick out low or high word */
   10560 		if ((act_offset & 0x2) == 0)
   10561 			data[i] = (uint16_t)(dword & 0xFFFF);
   10562 		else
   10563 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10564 	}
   10565 
   10566 	wm_put_swfwhw_semaphore(sc);
   10567 	return error;
   10568 }
   10569 
   10570 /* iNVM */
   10571 
   10572 static int
   10573 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10574 {
   10575 	int32_t  rv = 0;
   10576 	uint32_t invm_dword;
   10577 	uint16_t i;
   10578 	uint8_t record_type, word_address;
   10579 
   10580 	for (i = 0; i < INVM_SIZE; i++) {
   10581 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10582 		/* Get record type */
   10583 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10584 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10585 			break;
   10586 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10587 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10588 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10589 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10590 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10591 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10592 			if (word_address == address) {
   10593 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10594 				rv = 0;
   10595 				break;
   10596 			}
   10597 		}
   10598 	}
   10599 
   10600 	return rv;
   10601 }
   10602 
   10603 static int
   10604 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10605 {
   10606 	int rv = 0;
   10607 	int i;
   10608 
   10609 	for (i = 0; i < words; i++) {
   10610 		switch (offset + i) {
   10611 		case NVM_OFF_MACADDR:
   10612 		case NVM_OFF_MACADDR1:
   10613 		case NVM_OFF_MACADDR2:
   10614 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10615 			if (rv != 0) {
   10616 				data[i] = 0xffff;
   10617 				rv = -1;
   10618 			}
   10619 			break;
   10620 		case NVM_OFF_CFG2:
   10621 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10622 			if (rv != 0) {
   10623 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10624 				rv = 0;
   10625 			}
   10626 			break;
   10627 		case NVM_OFF_CFG4:
   10628 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10629 			if (rv != 0) {
   10630 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10631 				rv = 0;
   10632 			}
   10633 			break;
   10634 		case NVM_OFF_LED_1_CFG:
   10635 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10636 			if (rv != 0) {
   10637 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10638 				rv = 0;
   10639 			}
   10640 			break;
   10641 		case NVM_OFF_LED_0_2_CFG:
   10642 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10643 			if (rv != 0) {
   10644 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10645 				rv = 0;
   10646 			}
   10647 			break;
   10648 		case NVM_OFF_ID_LED_SETTINGS:
   10649 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10650 			if (rv != 0) {
   10651 				*data = ID_LED_RESERVED_FFFF;
   10652 				rv = 0;
   10653 			}
   10654 			break;
   10655 		default:
   10656 			DPRINTF(WM_DEBUG_NVM,
   10657 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10658 			*data = NVM_RESERVED_WORD;
   10659 			break;
   10660 		}
   10661 	}
   10662 
   10663 	return rv;
   10664 }
   10665 
   10666 /* Lock, detecting NVM type, validate checksum, version and read */
   10667 
   10668 /*
   10669  * wm_nvm_acquire:
   10670  *
   10671  *	Perform the EEPROM handshake required on some chips.
   10672  */
   10673 static int
   10674 wm_nvm_acquire(struct wm_softc *sc)
   10675 {
   10676 	uint32_t reg;
   10677 	int x;
   10678 	int ret = 0;
   10679 
   10680 	/* always success */
   10681 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10682 		return 0;
   10683 
   10684 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10685 		ret = wm_get_swfwhw_semaphore(sc);
   10686 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10687 		/* This will also do wm_get_swsm_semaphore() if needed */
   10688 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10689 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10690 		ret = wm_get_swsm_semaphore(sc);
   10691 	}
   10692 
   10693 	if (ret) {
   10694 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10695 			__func__);
   10696 		return 1;
   10697 	}
   10698 
   10699 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10700 		reg = CSR_READ(sc, WMREG_EECD);
   10701 
   10702 		/* Request EEPROM access. */
   10703 		reg |= EECD_EE_REQ;
   10704 		CSR_WRITE(sc, WMREG_EECD, reg);
   10705 
   10706 		/* ..and wait for it to be granted. */
   10707 		for (x = 0; x < 1000; x++) {
   10708 			reg = CSR_READ(sc, WMREG_EECD);
   10709 			if (reg & EECD_EE_GNT)
   10710 				break;
   10711 			delay(5);
   10712 		}
   10713 		if ((reg & EECD_EE_GNT) == 0) {
   10714 			aprint_error_dev(sc->sc_dev,
   10715 			    "could not acquire EEPROM GNT\n");
   10716 			reg &= ~EECD_EE_REQ;
   10717 			CSR_WRITE(sc, WMREG_EECD, reg);
   10718 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10719 				wm_put_swfwhw_semaphore(sc);
   10720 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10721 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10722 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10723 				wm_put_swsm_semaphore(sc);
   10724 			return 1;
   10725 		}
   10726 	}
   10727 
   10728 	return 0;
   10729 }
   10730 
   10731 /*
   10732  * wm_nvm_release:
   10733  *
   10734  *	Release the EEPROM mutex.
   10735  */
   10736 static void
   10737 wm_nvm_release(struct wm_softc *sc)
   10738 {
   10739 	uint32_t reg;
   10740 
   10741 	/* always success */
   10742 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10743 		return;
   10744 
   10745 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10746 		reg = CSR_READ(sc, WMREG_EECD);
   10747 		reg &= ~EECD_EE_REQ;
   10748 		CSR_WRITE(sc, WMREG_EECD, reg);
   10749 	}
   10750 
   10751 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10752 		wm_put_swfwhw_semaphore(sc);
   10753 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10754 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10755 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10756 		wm_put_swsm_semaphore(sc);
   10757 }
   10758 
   10759 static int
   10760 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10761 {
   10762 	uint32_t eecd = 0;
   10763 
   10764 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10765 	    || sc->sc_type == WM_T_82583) {
   10766 		eecd = CSR_READ(sc, WMREG_EECD);
   10767 
   10768 		/* Isolate bits 15 & 16 */
   10769 		eecd = ((eecd >> 15) & 0x03);
   10770 
   10771 		/* If both bits are set, device is Flash type */
   10772 		if (eecd == 0x03)
   10773 			return 0;
   10774 	}
   10775 	return 1;
   10776 }
   10777 
   10778 static int
   10779 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10780 {
   10781 	uint32_t eec;
   10782 
   10783 	eec = CSR_READ(sc, WMREG_EEC);
   10784 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10785 		return 1;
   10786 
   10787 	return 0;
   10788 }
   10789 
   10790 /*
   10791  * wm_nvm_validate_checksum
   10792  *
   10793  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10794  */
   10795 static int
   10796 wm_nvm_validate_checksum(struct wm_softc *sc)
   10797 {
   10798 	uint16_t checksum;
   10799 	uint16_t eeprom_data;
   10800 #ifdef WM_DEBUG
   10801 	uint16_t csum_wordaddr, valid_checksum;
   10802 #endif
   10803 	int i;
   10804 
   10805 	checksum = 0;
   10806 
   10807 	/* Don't check for I211 */
   10808 	if (sc->sc_type == WM_T_I211)
   10809 		return 0;
   10810 
   10811 #ifdef WM_DEBUG
   10812 	if (sc->sc_type == WM_T_PCH_LPT) {
   10813 		csum_wordaddr = NVM_OFF_COMPAT;
   10814 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10815 	} else {
   10816 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10817 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10818 	}
   10819 
   10820 	/* Dump EEPROM image for debug */
   10821 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10822 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10823 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10824 		/* XXX PCH_SPT? */
   10825 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10826 		if ((eeprom_data & valid_checksum) == 0) {
   10827 			DPRINTF(WM_DEBUG_NVM,
   10828 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10829 				device_xname(sc->sc_dev), eeprom_data,
   10830 				    valid_checksum));
   10831 		}
   10832 	}
   10833 
   10834 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10835 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10836 		for (i = 0; i < NVM_SIZE; i++) {
   10837 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10838 				printf("XXXX ");
   10839 			else
   10840 				printf("%04hx ", eeprom_data);
   10841 			if (i % 8 == 7)
   10842 				printf("\n");
   10843 		}
   10844 	}
   10845 
   10846 #endif /* WM_DEBUG */
   10847 
   10848 	for (i = 0; i < NVM_SIZE; i++) {
   10849 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10850 			return 1;
   10851 		checksum += eeprom_data;
   10852 	}
   10853 
   10854 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10855 #ifdef WM_DEBUG
   10856 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10857 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10858 #endif
   10859 	}
   10860 
   10861 	return 0;
   10862 }
   10863 
   10864 static void
   10865 wm_nvm_version_invm(struct wm_softc *sc)
   10866 {
   10867 	uint32_t dword;
   10868 
   10869 	/*
   10870 	 * Linux's code to decode version is very strange, so we don't
   10871 	 * obey that algorithm and just use word 61 as the document.
   10872 	 * Perhaps it's not perfect though...
   10873 	 *
   10874 	 * Example:
   10875 	 *
   10876 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10877 	 */
   10878 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10879 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10880 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10881 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10882 }
   10883 
   10884 static void
   10885 wm_nvm_version(struct wm_softc *sc)
   10886 {
   10887 	uint16_t major, minor, build, patch;
   10888 	uint16_t uid0, uid1;
   10889 	uint16_t nvm_data;
   10890 	uint16_t off;
   10891 	bool check_version = false;
   10892 	bool check_optionrom = false;
   10893 	bool have_build = false;
   10894 
   10895 	/*
   10896 	 * Version format:
   10897 	 *
   10898 	 * XYYZ
   10899 	 * X0YZ
   10900 	 * X0YY
   10901 	 *
   10902 	 * Example:
   10903 	 *
   10904 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10905 	 *	82571	0x50a6	5.10.6?
   10906 	 *	82572	0x506a	5.6.10?
   10907 	 *	82572EI	0x5069	5.6.9?
   10908 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10909 	 *		0x2013	2.1.3?
   10910 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10911 	 */
   10912 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10913 	switch (sc->sc_type) {
   10914 	case WM_T_82571:
   10915 	case WM_T_82572:
   10916 	case WM_T_82574:
   10917 	case WM_T_82583:
   10918 		check_version = true;
   10919 		check_optionrom = true;
   10920 		have_build = true;
   10921 		break;
   10922 	case WM_T_82575:
   10923 	case WM_T_82576:
   10924 	case WM_T_82580:
   10925 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10926 			check_version = true;
   10927 		break;
   10928 	case WM_T_I211:
   10929 		wm_nvm_version_invm(sc);
   10930 		goto printver;
   10931 	case WM_T_I210:
   10932 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10933 			wm_nvm_version_invm(sc);
   10934 			goto printver;
   10935 		}
   10936 		/* FALLTHROUGH */
   10937 	case WM_T_I350:
   10938 	case WM_T_I354:
   10939 		check_version = true;
   10940 		check_optionrom = true;
   10941 		break;
   10942 	default:
   10943 		return;
   10944 	}
   10945 	if (check_version) {
   10946 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10947 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10948 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10949 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10950 			build = nvm_data & NVM_BUILD_MASK;
   10951 			have_build = true;
   10952 		} else
   10953 			minor = nvm_data & 0x00ff;
   10954 
   10955 		/* Decimal */
   10956 		minor = (minor / 16) * 10 + (minor % 16);
   10957 		sc->sc_nvm_ver_major = major;
   10958 		sc->sc_nvm_ver_minor = minor;
   10959 
   10960 printver:
   10961 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10962 		    sc->sc_nvm_ver_minor);
   10963 		if (have_build) {
   10964 			sc->sc_nvm_ver_build = build;
   10965 			aprint_verbose(".%d", build);
   10966 		}
   10967 	}
   10968 	if (check_optionrom) {
   10969 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10970 		/* Option ROM Version */
   10971 		if ((off != 0x0000) && (off != 0xffff)) {
   10972 			off += NVM_COMBO_VER_OFF;
   10973 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10974 			wm_nvm_read(sc, off, 1, &uid0);
   10975 			if ((uid0 != 0) && (uid0 != 0xffff)
   10976 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10977 				/* 16bits */
   10978 				major = uid0 >> 8;
   10979 				build = (uid0 << 8) | (uid1 >> 8);
   10980 				patch = uid1 & 0x00ff;
   10981 				aprint_verbose(", option ROM Version %d.%d.%d",
   10982 				    major, build, patch);
   10983 			}
   10984 		}
   10985 	}
   10986 
   10987 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10988 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10989 }
   10990 
   10991 /*
   10992  * wm_nvm_read:
   10993  *
   10994  *	Read data from the serial EEPROM.
   10995  */
   10996 static int
   10997 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10998 {
   10999 	int rv;
   11000 
   11001 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11002 		return 1;
   11003 
   11004 	if (wm_nvm_acquire(sc))
   11005 		return 1;
   11006 
   11007 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11008 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11009 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11010 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11011 	else if (sc->sc_type == WM_T_PCH_SPT)
   11012 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11013 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11014 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11015 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11016 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11017 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11018 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11019 	else
   11020 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11021 
   11022 	wm_nvm_release(sc);
   11023 	return rv;
   11024 }
   11025 
   11026 /*
   11027  * Hardware semaphores.
   11028  * Very complexed...
   11029  */
   11030 
   11031 static int
   11032 wm_get_swsm_semaphore(struct wm_softc *sc)
   11033 {
   11034 	int32_t timeout;
   11035 	uint32_t swsm;
   11036 
   11037 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11038 		/* Get the SW semaphore. */
   11039 		timeout = sc->sc_nvm_wordsize + 1;
   11040 		while (timeout) {
   11041 			swsm = CSR_READ(sc, WMREG_SWSM);
   11042 
   11043 			if ((swsm & SWSM_SMBI) == 0)
   11044 				break;
   11045 
   11046 			delay(50);
   11047 			timeout--;
   11048 		}
   11049 
   11050 		if (timeout == 0) {
   11051 			aprint_error_dev(sc->sc_dev,
   11052 			    "could not acquire SWSM SMBI\n");
   11053 			return 1;
   11054 		}
   11055 	}
   11056 
   11057 	/* Get the FW semaphore. */
   11058 	timeout = sc->sc_nvm_wordsize + 1;
   11059 	while (timeout) {
   11060 		swsm = CSR_READ(sc, WMREG_SWSM);
   11061 		swsm |= SWSM_SWESMBI;
   11062 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11063 		/* If we managed to set the bit we got the semaphore. */
   11064 		swsm = CSR_READ(sc, WMREG_SWSM);
   11065 		if (swsm & SWSM_SWESMBI)
   11066 			break;
   11067 
   11068 		delay(50);
   11069 		timeout--;
   11070 	}
   11071 
   11072 	if (timeout == 0) {
   11073 		aprint_error_dev(sc->sc_dev,
   11074 		    "could not acquire SWSM SWESMBI\n");
   11075 		/* Release semaphores */
   11076 		wm_put_swsm_semaphore(sc);
   11077 		return 1;
   11078 	}
   11079 	return 0;
   11080 }
   11081 
   11082 static void
   11083 wm_put_swsm_semaphore(struct wm_softc *sc)
   11084 {
   11085 	uint32_t swsm;
   11086 
   11087 	swsm = CSR_READ(sc, WMREG_SWSM);
   11088 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11089 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11090 }
   11091 
   11092 static int
   11093 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11094 {
   11095 	uint32_t swfw_sync;
   11096 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11097 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11098 	int timeout = 200;
   11099 
   11100 	for (timeout = 0; timeout < 200; timeout++) {
   11101 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11102 			if (wm_get_swsm_semaphore(sc)) {
   11103 				aprint_error_dev(sc->sc_dev,
   11104 				    "%s: failed to get semaphore\n",
   11105 				    __func__);
   11106 				return 1;
   11107 			}
   11108 		}
   11109 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11110 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11111 			swfw_sync |= swmask;
   11112 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11113 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11114 				wm_put_swsm_semaphore(sc);
   11115 			return 0;
   11116 		}
   11117 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11118 			wm_put_swsm_semaphore(sc);
   11119 		delay(5000);
   11120 	}
   11121 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11122 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11123 	return 1;
   11124 }
   11125 
   11126 static void
   11127 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11128 {
   11129 	uint32_t swfw_sync;
   11130 
   11131 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11132 		while (wm_get_swsm_semaphore(sc) != 0)
   11133 			continue;
   11134 	}
   11135 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11136 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11137 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11138 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11139 		wm_put_swsm_semaphore(sc);
   11140 }
   11141 
   11142 static int
   11143 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11144 {
   11145 	uint32_t ext_ctrl;
   11146 	int timeout = 200;
   11147 
   11148 	for (timeout = 0; timeout < 200; timeout++) {
   11149 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11150 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11151 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11152 
   11153 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11154 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11155 			return 0;
   11156 		delay(5000);
   11157 	}
   11158 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11159 	    device_xname(sc->sc_dev), ext_ctrl);
   11160 	return 1;
   11161 }
   11162 
   11163 static void
   11164 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11165 {
   11166 	uint32_t ext_ctrl;
   11167 
   11168 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11169 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11170 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11171 }
   11172 
   11173 static int
   11174 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11175 {
   11176 	int i = 0;
   11177 	uint32_t reg;
   11178 
   11179 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11180 	do {
   11181 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11182 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11183 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11184 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11185 			break;
   11186 		delay(2*1000);
   11187 		i++;
   11188 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11189 
   11190 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11191 		wm_put_hw_semaphore_82573(sc);
   11192 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11193 		    device_xname(sc->sc_dev));
   11194 		return -1;
   11195 	}
   11196 
   11197 	return 0;
   11198 }
   11199 
   11200 static void
   11201 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11202 {
   11203 	uint32_t reg;
   11204 
   11205 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11206 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11207 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11208 }
   11209 
   11210 /*
   11211  * Management mode and power management related subroutines.
   11212  * BMC, AMT, suspend/resume and EEE.
   11213  */
   11214 
   11215 #ifdef WM_WOL
   11216 static int
   11217 wm_check_mng_mode(struct wm_softc *sc)
   11218 {
   11219 	int rv;
   11220 
   11221 	switch (sc->sc_type) {
   11222 	case WM_T_ICH8:
   11223 	case WM_T_ICH9:
   11224 	case WM_T_ICH10:
   11225 	case WM_T_PCH:
   11226 	case WM_T_PCH2:
   11227 	case WM_T_PCH_LPT:
   11228 	case WM_T_PCH_SPT:
   11229 		rv = wm_check_mng_mode_ich8lan(sc);
   11230 		break;
   11231 	case WM_T_82574:
   11232 	case WM_T_82583:
   11233 		rv = wm_check_mng_mode_82574(sc);
   11234 		break;
   11235 	case WM_T_82571:
   11236 	case WM_T_82572:
   11237 	case WM_T_82573:
   11238 	case WM_T_80003:
   11239 		rv = wm_check_mng_mode_generic(sc);
   11240 		break;
   11241 	default:
   11242 		/* noting to do */
   11243 		rv = 0;
   11244 		break;
   11245 	}
   11246 
   11247 	return rv;
   11248 }
   11249 
   11250 static int
   11251 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11252 {
   11253 	uint32_t fwsm;
   11254 
   11255 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11256 
   11257 	if (((fwsm & FWSM_FW_VALID) != 0)
   11258 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11259 		return 1;
   11260 
   11261 	return 0;
   11262 }
   11263 
   11264 static int
   11265 wm_check_mng_mode_82574(struct wm_softc *sc)
   11266 {
   11267 	uint16_t data;
   11268 
   11269 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11270 
   11271 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11272 		return 1;
   11273 
   11274 	return 0;
   11275 }
   11276 
   11277 static int
   11278 wm_check_mng_mode_generic(struct wm_softc *sc)
   11279 {
   11280 	uint32_t fwsm;
   11281 
   11282 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11283 
   11284 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11285 		return 1;
   11286 
   11287 	return 0;
   11288 }
   11289 #endif /* WM_WOL */
   11290 
   11291 static int
   11292 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11293 {
   11294 	uint32_t manc, fwsm, factps;
   11295 
   11296 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11297 		return 0;
   11298 
   11299 	manc = CSR_READ(sc, WMREG_MANC);
   11300 
   11301 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11302 		device_xname(sc->sc_dev), manc));
   11303 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11304 		return 0;
   11305 
   11306 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11307 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11308 		factps = CSR_READ(sc, WMREG_FACTPS);
   11309 		if (((factps & FACTPS_MNGCG) == 0)
   11310 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11311 			return 1;
   11312 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11313 		uint16_t data;
   11314 
   11315 		factps = CSR_READ(sc, WMREG_FACTPS);
   11316 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11317 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11318 			device_xname(sc->sc_dev), factps, data));
   11319 		if (((factps & FACTPS_MNGCG) == 0)
   11320 		    && ((data & NVM_CFG2_MNGM_MASK)
   11321 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11322 			return 1;
   11323 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11324 	    && ((manc & MANC_ASF_EN) == 0))
   11325 		return 1;
   11326 
   11327 	return 0;
   11328 }
   11329 
   11330 static bool
   11331 wm_phy_resetisblocked(struct wm_softc *sc)
   11332 {
   11333 	bool blocked = false;
   11334 	uint32_t reg;
   11335 	int i = 0;
   11336 
   11337 	switch (sc->sc_type) {
   11338 	case WM_T_ICH8:
   11339 	case WM_T_ICH9:
   11340 	case WM_T_ICH10:
   11341 	case WM_T_PCH:
   11342 	case WM_T_PCH2:
   11343 	case WM_T_PCH_LPT:
   11344 	case WM_T_PCH_SPT:
   11345 		do {
   11346 			reg = CSR_READ(sc, WMREG_FWSM);
   11347 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11348 				blocked = true;
   11349 				delay(10*1000);
   11350 				continue;
   11351 			}
   11352 			blocked = false;
   11353 		} while (blocked && (i++ < 10));
   11354 		return blocked;
   11355 		break;
   11356 	case WM_T_82571:
   11357 	case WM_T_82572:
   11358 	case WM_T_82573:
   11359 	case WM_T_82574:
   11360 	case WM_T_82583:
   11361 	case WM_T_80003:
   11362 		reg = CSR_READ(sc, WMREG_MANC);
   11363 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11364 			return true;
   11365 		else
   11366 			return false;
   11367 		break;
   11368 	default:
   11369 		/* no problem */
   11370 		break;
   11371 	}
   11372 
   11373 	return false;
   11374 }
   11375 
   11376 static void
   11377 wm_get_hw_control(struct wm_softc *sc)
   11378 {
   11379 	uint32_t reg;
   11380 
   11381 	switch (sc->sc_type) {
   11382 	case WM_T_82573:
   11383 		reg = CSR_READ(sc, WMREG_SWSM);
   11384 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11385 		break;
   11386 	case WM_T_82571:
   11387 	case WM_T_82572:
   11388 	case WM_T_82574:
   11389 	case WM_T_82583:
   11390 	case WM_T_80003:
   11391 	case WM_T_ICH8:
   11392 	case WM_T_ICH9:
   11393 	case WM_T_ICH10:
   11394 	case WM_T_PCH:
   11395 	case WM_T_PCH2:
   11396 	case WM_T_PCH_LPT:
   11397 	case WM_T_PCH_SPT:
   11398 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11399 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11400 		break;
   11401 	default:
   11402 		break;
   11403 	}
   11404 }
   11405 
   11406 static void
   11407 wm_release_hw_control(struct wm_softc *sc)
   11408 {
   11409 	uint32_t reg;
   11410 
   11411 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11412 		return;
   11413 
   11414 	if (sc->sc_type == WM_T_82573) {
   11415 		reg = CSR_READ(sc, WMREG_SWSM);
   11416 		reg &= ~SWSM_DRV_LOAD;
   11417 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11418 	} else {
   11419 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11420 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11421 	}
   11422 }
   11423 
   11424 static void
   11425 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11426 {
   11427 	uint32_t reg;
   11428 
   11429 	if (sc->sc_type < WM_T_PCH2)
   11430 		return;
   11431 
   11432 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11433 
   11434 	if (gate)
   11435 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11436 	else
   11437 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11438 
   11439 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11440 }
   11441 
   11442 static void
   11443 wm_smbustopci(struct wm_softc *sc)
   11444 {
   11445 	uint32_t fwsm, reg;
   11446 
   11447 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11448 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11449 
   11450 	/* Acquire semaphore */
   11451 	wm_get_swfwhw_semaphore(sc);
   11452 
   11453 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11454 	if (((fwsm & FWSM_FW_VALID) == 0)
   11455 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11456 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11457 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11458 			reg |= CTRL_EXT_FORCE_SMBUS;
   11459 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11460 			CSR_WRITE_FLUSH(sc);
   11461 			delay(50*1000);
   11462 		}
   11463 
   11464 		/* Toggle LANPHYPC */
   11465 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11466 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11467 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11468 		CSR_WRITE_FLUSH(sc);
   11469 		delay(10);
   11470 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11471 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11472 		CSR_WRITE_FLUSH(sc);
   11473 		delay(50*1000);
   11474 
   11475 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11476 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11477 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11478 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11479 		}
   11480 	}
   11481 
   11482 	/* Release semaphore */
   11483 	wm_put_swfwhw_semaphore(sc);
   11484 
   11485 	/*
   11486 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11487 	 */
   11488 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11489 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11490 }
   11491 
   11492 static void
   11493 wm_init_manageability(struct wm_softc *sc)
   11494 {
   11495 
   11496 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11497 		device_xname(sc->sc_dev), __func__));
   11498 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11499 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11500 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11501 
   11502 		/* Disable hardware interception of ARP */
   11503 		manc &= ~MANC_ARP_EN;
   11504 
   11505 		/* Enable receiving management packets to the host */
   11506 		if (sc->sc_type >= WM_T_82571) {
   11507 			manc |= MANC_EN_MNG2HOST;
   11508 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11509 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11510 		}
   11511 
   11512 		CSR_WRITE(sc, WMREG_MANC, manc);
   11513 	}
   11514 }
   11515 
   11516 static void
   11517 wm_release_manageability(struct wm_softc *sc)
   11518 {
   11519 
   11520 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11521 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11522 
   11523 		manc |= MANC_ARP_EN;
   11524 		if (sc->sc_type >= WM_T_82571)
   11525 			manc &= ~MANC_EN_MNG2HOST;
   11526 
   11527 		CSR_WRITE(sc, WMREG_MANC, manc);
   11528 	}
   11529 }
   11530 
   11531 static void
   11532 wm_get_wakeup(struct wm_softc *sc)
   11533 {
   11534 
   11535 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11536 	switch (sc->sc_type) {
   11537 	case WM_T_82573:
   11538 	case WM_T_82583:
   11539 		sc->sc_flags |= WM_F_HAS_AMT;
   11540 		/* FALLTHROUGH */
   11541 	case WM_T_80003:
   11542 	case WM_T_82541:
   11543 	case WM_T_82547:
   11544 	case WM_T_82571:
   11545 	case WM_T_82572:
   11546 	case WM_T_82574:
   11547 	case WM_T_82575:
   11548 	case WM_T_82576:
   11549 	case WM_T_82580:
   11550 	case WM_T_I350:
   11551 	case WM_T_I354:
   11552 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11553 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11554 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11555 		break;
   11556 	case WM_T_ICH8:
   11557 	case WM_T_ICH9:
   11558 	case WM_T_ICH10:
   11559 	case WM_T_PCH:
   11560 	case WM_T_PCH2:
   11561 	case WM_T_PCH_LPT:
   11562 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11563 		sc->sc_flags |= WM_F_HAS_AMT;
   11564 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11565 		break;
   11566 	default:
   11567 		break;
   11568 	}
   11569 
   11570 	/* 1: HAS_MANAGE */
   11571 	if (wm_enable_mng_pass_thru(sc) != 0)
   11572 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11573 
   11574 #ifdef WM_DEBUG
   11575 	printf("\n");
   11576 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11577 		printf("HAS_AMT,");
   11578 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11579 		printf("ARC_SUBSYS_VALID,");
   11580 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11581 		printf("ASF_FIRMWARE_PRES,");
   11582 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11583 		printf("HAS_MANAGE,");
   11584 	printf("\n");
   11585 #endif
   11586 	/*
   11587 	 * Note that the WOL flags is set after the resetting of the eeprom
   11588 	 * stuff
   11589 	 */
   11590 }
   11591 
   11592 #ifdef WM_WOL
   11593 /* WOL in the newer chipset interfaces (pchlan) */
   11594 static void
   11595 wm_enable_phy_wakeup(struct wm_softc *sc)
   11596 {
   11597 #if 0
   11598 	uint16_t preg;
   11599 
   11600 	/* Copy MAC RARs to PHY RARs */
   11601 
   11602 	/* Copy MAC MTA to PHY MTA */
   11603 
   11604 	/* Configure PHY Rx Control register */
   11605 
   11606 	/* Enable PHY wakeup in MAC register */
   11607 
   11608 	/* Configure and enable PHY wakeup in PHY registers */
   11609 
   11610 	/* Activate PHY wakeup */
   11611 
   11612 	/* XXX */
   11613 #endif
   11614 }
   11615 
   11616 /* Power down workaround on D3 */
   11617 static void
   11618 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11619 {
   11620 	uint32_t reg;
   11621 	int i;
   11622 
   11623 	for (i = 0; i < 2; i++) {
   11624 		/* Disable link */
   11625 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11626 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11627 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11628 
   11629 		/*
   11630 		 * Call gig speed drop workaround on Gig disable before
   11631 		 * accessing any PHY registers
   11632 		 */
   11633 		if (sc->sc_type == WM_T_ICH8)
   11634 			wm_gig_downshift_workaround_ich8lan(sc);
   11635 
   11636 		/* Write VR power-down enable */
   11637 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11638 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11639 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11640 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11641 
   11642 		/* Read it back and test */
   11643 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11644 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11645 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11646 			break;
   11647 
   11648 		/* Issue PHY reset and repeat at most one more time */
   11649 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11650 	}
   11651 }
   11652 
   11653 static void
   11654 wm_enable_wakeup(struct wm_softc *sc)
   11655 {
   11656 	uint32_t reg, pmreg;
   11657 	pcireg_t pmode;
   11658 
   11659 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11660 		&pmreg, NULL) == 0)
   11661 		return;
   11662 
   11663 	/* Advertise the wakeup capability */
   11664 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11665 	    | CTRL_SWDPIN(3));
   11666 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11667 
   11668 	/* ICH workaround */
   11669 	switch (sc->sc_type) {
   11670 	case WM_T_ICH8:
   11671 	case WM_T_ICH9:
   11672 	case WM_T_ICH10:
   11673 	case WM_T_PCH:
   11674 	case WM_T_PCH2:
   11675 	case WM_T_PCH_LPT:
   11676 	case WM_T_PCH_SPT:
   11677 		/* Disable gig during WOL */
   11678 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11679 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11680 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11681 		if (sc->sc_type == WM_T_PCH)
   11682 			wm_gmii_reset(sc);
   11683 
   11684 		/* Power down workaround */
   11685 		if (sc->sc_phytype == WMPHY_82577) {
   11686 			struct mii_softc *child;
   11687 
   11688 			/* Assume that the PHY is copper */
   11689 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11690 			if (child->mii_mpd_rev <= 2)
   11691 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11692 				    (768 << 5) | 25, 0x0444); /* magic num */
   11693 		}
   11694 		break;
   11695 	default:
   11696 		break;
   11697 	}
   11698 
   11699 	/* Keep the laser running on fiber adapters */
   11700 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11701 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11702 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11703 		reg |= CTRL_EXT_SWDPIN(3);
   11704 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11705 	}
   11706 
   11707 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11708 #if 0	/* for the multicast packet */
   11709 	reg |= WUFC_MC;
   11710 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11711 #endif
   11712 
   11713 	if (sc->sc_type == WM_T_PCH) {
   11714 		wm_enable_phy_wakeup(sc);
   11715 	} else {
   11716 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11717 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11718 	}
   11719 
   11720 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11721 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11722 		|| (sc->sc_type == WM_T_PCH2))
   11723 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11724 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11725 
   11726 	/* Request PME */
   11727 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11728 #if 0
   11729 	/* Disable WOL */
   11730 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11731 #else
   11732 	/* For WOL */
   11733 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11734 #endif
   11735 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11736 }
   11737 #endif /* WM_WOL */
   11738 
   11739 /* LPLU */
   11740 
   11741 static void
   11742 wm_lplu_d0_disable(struct wm_softc *sc)
   11743 {
   11744 	uint32_t reg;
   11745 
   11746 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11747 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11748 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11749 }
   11750 
   11751 static void
   11752 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11753 {
   11754 	uint32_t reg;
   11755 
   11756 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11757 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11758 	reg |= HV_OEM_BITS_ANEGNOW;
   11759 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11760 }
   11761 
   11762 /* EEE */
   11763 
   11764 static void
   11765 wm_set_eee_i350(struct wm_softc *sc)
   11766 {
   11767 	uint32_t ipcnfg, eeer;
   11768 
   11769 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11770 	eeer = CSR_READ(sc, WMREG_EEER);
   11771 
   11772 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11773 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11774 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11775 		    | EEER_LPI_FC);
   11776 	} else {
   11777 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11778 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11779 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11780 		    | EEER_LPI_FC);
   11781 	}
   11782 
   11783 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11784 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11785 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11786 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11787 }
   11788 
   11789 /*
   11790  * Workarounds (mainly PHY related).
   11791  * Basically, PHY's workarounds are in the PHY drivers.
   11792  */
   11793 
   11794 /* Work-around for 82566 Kumeran PCS lock loss */
   11795 static void
   11796 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11797 {
   11798 #if 0
   11799 	int miistatus, active, i;
   11800 	int reg;
   11801 
   11802 	miistatus = sc->sc_mii.mii_media_status;
   11803 
   11804 	/* If the link is not up, do nothing */
   11805 	if ((miistatus & IFM_ACTIVE) == 0)
   11806 		return;
   11807 
   11808 	active = sc->sc_mii.mii_media_active;
   11809 
   11810 	/* Nothing to do if the link is other than 1Gbps */
   11811 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11812 		return;
   11813 
   11814 	for (i = 0; i < 10; i++) {
   11815 		/* read twice */
   11816 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11817 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11818 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11819 			goto out;	/* GOOD! */
   11820 
   11821 		/* Reset the PHY */
   11822 		wm_gmii_reset(sc);
   11823 		delay(5*1000);
   11824 	}
   11825 
   11826 	/* Disable GigE link negotiation */
   11827 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11828 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11829 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11830 
   11831 	/*
   11832 	 * Call gig speed drop workaround on Gig disable before accessing
   11833 	 * any PHY registers.
   11834 	 */
   11835 	wm_gig_downshift_workaround_ich8lan(sc);
   11836 
   11837 out:
   11838 	return;
   11839 #endif
   11840 }
   11841 
   11842 /* WOL from S5 stops working */
   11843 static void
   11844 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11845 {
   11846 	uint16_t kmrn_reg;
   11847 
   11848 	/* Only for igp3 */
   11849 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11850 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11851 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11852 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11853 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11854 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11855 	}
   11856 }
   11857 
   11858 /*
   11859  * Workaround for pch's PHYs
   11860  * XXX should be moved to new PHY driver?
   11861  */
   11862 static void
   11863 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11864 {
   11865 	if (sc->sc_phytype == WMPHY_82577)
   11866 		wm_set_mdio_slow_mode_hv(sc);
   11867 
   11868 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11869 
   11870 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11871 
   11872 	/* 82578 */
   11873 	if (sc->sc_phytype == WMPHY_82578) {
   11874 		/* PCH rev. < 3 */
   11875 		if (sc->sc_rev < 3) {
   11876 			/* XXX 6 bit shift? Why? Is it page2? */
   11877 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11878 			    0x66c0);
   11879 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11880 			    0xffff);
   11881 		}
   11882 
   11883 		/* XXX phy rev. < 2 */
   11884 	}
   11885 
   11886 	/* Select page 0 */
   11887 
   11888 	/* XXX acquire semaphore */
   11889 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11890 	/* XXX release semaphore */
   11891 
   11892 	/*
   11893 	 * Configure the K1 Si workaround during phy reset assuming there is
   11894 	 * link so that it disables K1 if link is in 1Gbps.
   11895 	 */
   11896 	wm_k1_gig_workaround_hv(sc, 1);
   11897 }
   11898 
   11899 static void
   11900 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11901 {
   11902 
   11903 	wm_set_mdio_slow_mode_hv(sc);
   11904 }
   11905 
   11906 static void
   11907 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11908 {
   11909 	int k1_enable = sc->sc_nvm_k1_enabled;
   11910 
   11911 	/* XXX acquire semaphore */
   11912 
   11913 	if (link) {
   11914 		k1_enable = 0;
   11915 
   11916 		/* Link stall fix for link up */
   11917 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11918 	} else {
   11919 		/* Link stall fix for link down */
   11920 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11921 	}
   11922 
   11923 	wm_configure_k1_ich8lan(sc, k1_enable);
   11924 
   11925 	/* XXX release semaphore */
   11926 }
   11927 
   11928 static void
   11929 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11930 {
   11931 	uint32_t reg;
   11932 
   11933 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11934 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11935 	    reg | HV_KMRN_MDIO_SLOW);
   11936 }
   11937 
   11938 static void
   11939 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11940 {
   11941 	uint32_t ctrl, ctrl_ext, tmp;
   11942 	uint16_t kmrn_reg;
   11943 
   11944 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11945 
   11946 	if (k1_enable)
   11947 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11948 	else
   11949 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11950 
   11951 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11952 
   11953 	delay(20);
   11954 
   11955 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11956 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11957 
   11958 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11959 	tmp |= CTRL_FRCSPD;
   11960 
   11961 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11962 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11963 	CSR_WRITE_FLUSH(sc);
   11964 	delay(20);
   11965 
   11966 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11967 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11968 	CSR_WRITE_FLUSH(sc);
   11969 	delay(20);
   11970 }
   11971 
   11972 /* special case - for 82575 - need to do manual init ... */
   11973 static void
   11974 wm_reset_init_script_82575(struct wm_softc *sc)
   11975 {
   11976 	/*
   11977 	 * remark: this is untested code - we have no board without EEPROM
   11978 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11979 	 */
   11980 
   11981 	/* SerDes configuration via SERDESCTRL */
   11982 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11983 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11984 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11985 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11986 
   11987 	/* CCM configuration via CCMCTL register */
   11988 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11989 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11990 
   11991 	/* PCIe lanes configuration */
   11992 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11993 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11994 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11995 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11996 
   11997 	/* PCIe PLL Configuration */
   11998 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11999 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12000 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12001 }
   12002 
   12003 static void
   12004 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12005 {
   12006 	uint32_t reg;
   12007 	uint16_t nvmword;
   12008 	int rv;
   12009 
   12010 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12011 		return;
   12012 
   12013 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12014 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12015 	if (rv != 0) {
   12016 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12017 		    __func__);
   12018 		return;
   12019 	}
   12020 
   12021 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12022 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12023 		reg |= MDICNFG_DEST;
   12024 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12025 		reg |= MDICNFG_COM_MDIO;
   12026 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12027 }
   12028 
   12029 /*
   12030  * I210 Errata 25 and I211 Errata 10
   12031  * Slow System Clock.
   12032  */
   12033 static void
   12034 wm_pll_workaround_i210(struct wm_softc *sc)
   12035 {
   12036 	uint32_t mdicnfg, wuc;
   12037 	uint32_t reg;
   12038 	pcireg_t pcireg;
   12039 	uint32_t pmreg;
   12040 	uint16_t nvmword, tmp_nvmword;
   12041 	int phyval;
   12042 	bool wa_done = false;
   12043 	int i;
   12044 
   12045 	/* Save WUC and MDICNFG registers */
   12046 	wuc = CSR_READ(sc, WMREG_WUC);
   12047 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12048 
   12049 	reg = mdicnfg & ~MDICNFG_DEST;
   12050 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12051 
   12052 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12053 		nvmword = INVM_DEFAULT_AL;
   12054 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12055 
   12056 	/* Get Power Management cap offset */
   12057 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12058 		&pmreg, NULL) == 0)
   12059 		return;
   12060 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12061 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12062 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12063 
   12064 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12065 			break; /* OK */
   12066 		}
   12067 
   12068 		wa_done = true;
   12069 		/* Directly reset the internal PHY */
   12070 		reg = CSR_READ(sc, WMREG_CTRL);
   12071 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12072 
   12073 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12074 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12075 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12076 
   12077 		CSR_WRITE(sc, WMREG_WUC, 0);
   12078 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12079 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12080 
   12081 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12082 		    pmreg + PCI_PMCSR);
   12083 		pcireg |= PCI_PMCSR_STATE_D3;
   12084 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12085 		    pmreg + PCI_PMCSR, pcireg);
   12086 		delay(1000);
   12087 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12088 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12089 		    pmreg + PCI_PMCSR, pcireg);
   12090 
   12091 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12092 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12093 
   12094 		/* Restore WUC register */
   12095 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12096 	}
   12097 
   12098 	/* Restore MDICNFG setting */
   12099 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12100 	if (wa_done)
   12101 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12102 }
   12103