Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.414
      1 /*	$NetBSD: if_wm.c,v 1.414 2016/06/14 17:09:20 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  *	- restructure evcnt
     85  */
     86 
     87 #include <sys/cdefs.h>
     88 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.414 2016/06/14 17:09:20 knakahara Exp $");
     89 
     90 #ifdef _KERNEL_OPT
     91 #include "opt_net_mpsafe.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		0x01
    148 #define	WM_DEBUG_TX		0x02
    149 #define	WM_DEBUG_RX		0x04
    150 #define	WM_DEBUG_GMII		0x08
    151 #define	WM_DEBUG_MANAGE		0x10
    152 #define	WM_DEBUG_NVM		0x20
    153 #define	WM_DEBUG_INIT		0x40
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 struct wm_txqueue {
    258 	kmutex_t *txq_lock;		/* lock for tx operations */
    259 
    260 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    261 
    262 	/* Software state for the transmit descriptors. */
    263 	int txq_num;			/* must be a power of two */
    264 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    265 
    266 	/* TX control data structures. */
    267 	int txq_ndesc;			/* must be a power of two */
    268 	size_t txq_descsize;		/* a tx descriptor size */
    269 	txdescs_t *txq_descs_u;
    270         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    271 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    272 	int txq_desc_rseg;		/* real number of control segment */
    273 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    274 #define	txq_descs	txq_descs_u->sctxu_txdescs
    275 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    276 
    277 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    278 
    279 	int txq_free;			/* number of free Tx descriptors */
    280 	int txq_next;			/* next ready Tx descriptor */
    281 
    282 	int txq_sfree;			/* number of free Tx jobs */
    283 	int txq_snext;			/* next free Tx job */
    284 	int txq_sdirty;			/* dirty Tx jobs */
    285 
    286 	/* These 4 variables are used only on the 82547. */
    287 	int txq_fifo_size;		/* Tx FIFO size */
    288 	int txq_fifo_head;		/* current head of FIFO */
    289 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    290 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    291 
    292 	/*
    293 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    294 	 * CPUs. This queue intermediate them without block.
    295 	 */
    296 	pcq_t *txq_interq;
    297 
    298 	/*
    299 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    300 	 * to manage Tx H/W queue's busy flag.
    301 	 */
    302 	int txq_flags;			/* flags for H/W queue, see below */
    303 #define	WM_TXQ_NO_SPACE	0x1
    304 
    305 	/* XXX which event counter is required? */
    306 };
    307 
    308 struct wm_rxqueue {
    309 	kmutex_t *rxq_lock;		/* lock for rx operations */
    310 
    311 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    312 
    313 	/* Software state for the receive descriptors. */
    314 	wiseman_rxdesc_t *rxq_descs;
    315 
    316 	/* RX control data structures. */
    317 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    318 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    319 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    320 	int rxq_desc_rseg;		/* real number of control segment */
    321 	size_t rxq_desc_size;		/* control data size */
    322 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    323 
    324 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    325 
    326 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    327 	int rxq_discard;
    328 	int rxq_len;
    329 	struct mbuf *rxq_head;
    330 	struct mbuf *rxq_tail;
    331 	struct mbuf **rxq_tailp;
    332 
    333 	/* XXX which event counter is required? */
    334 };
    335 
    336 struct wm_queue {
    337 	int wmq_id;			/* index of transmit and receive queues */
    338 	int wmq_intr_idx;		/* index of MSI-X tables */
    339 
    340 	struct wm_txqueue wmq_txq;
    341 	struct wm_rxqueue wmq_rxq;
    342 };
    343 
    344 /*
    345  * Software state per device.
    346  */
    347 struct wm_softc {
    348 	device_t sc_dev;		/* generic device information */
    349 	bus_space_tag_t sc_st;		/* bus space tag */
    350 	bus_space_handle_t sc_sh;	/* bus space handle */
    351 	bus_size_t sc_ss;		/* bus space size */
    352 	bus_space_tag_t sc_iot;		/* I/O space tag */
    353 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    354 	bus_size_t sc_ios;		/* I/O space size */
    355 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    356 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    357 	bus_size_t sc_flashs;		/* flash registers space size */
    358 	off_t sc_flashreg_offset;	/*
    359 					 * offset to flash registers from
    360 					 * start of BAR
    361 					 */
    362 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    363 
    364 	struct ethercom sc_ethercom;	/* ethernet common data */
    365 	struct mii_data sc_mii;		/* MII/media information */
    366 
    367 	pci_chipset_tag_t sc_pc;
    368 	pcitag_t sc_pcitag;
    369 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    370 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    371 
    372 	uint16_t sc_pcidevid;		/* PCI device ID */
    373 	wm_chip_type sc_type;		/* MAC type */
    374 	int sc_rev;			/* MAC revision */
    375 	wm_phy_type sc_phytype;		/* PHY type */
    376 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    377 #define	WM_MEDIATYPE_UNKNOWN		0x00
    378 #define	WM_MEDIATYPE_FIBER		0x01
    379 #define	WM_MEDIATYPE_COPPER		0x02
    380 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    381 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    382 	int sc_flags;			/* flags; see below */
    383 	int sc_if_flags;		/* last if_flags */
    384 	int sc_flowflags;		/* 802.3x flow control flags */
    385 	int sc_align_tweak;
    386 
    387 	void *sc_ihs[WM_MAX_NINTR];	/*
    388 					 * interrupt cookie.
    389 					 * legacy and msi use sc_ihs[0].
    390 					 */
    391 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    392 	int sc_nintrs;			/* number of interrupts */
    393 
    394 	int sc_link_intr_idx;		/* index of MSI-X tables */
    395 
    396 	callout_t sc_tick_ch;		/* tick callout */
    397 	bool sc_stopping;
    398 
    399 	int sc_nvm_ver_major;
    400 	int sc_nvm_ver_minor;
    401 	int sc_nvm_ver_build;
    402 	int sc_nvm_addrbits;		/* NVM address bits */
    403 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    404 	int sc_ich8_flash_base;
    405 	int sc_ich8_flash_bank_size;
    406 	int sc_nvm_k1_enabled;
    407 
    408 	int sc_nqueues;
    409 	struct wm_queue *sc_queue;
    410 
    411 	int sc_affinity_offset;
    412 
    413 #ifdef WM_EVENT_COUNTERS
    414 	/* Event counters. */
    415 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    416 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    417 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    418 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    419 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    420 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    421 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    422 
    423 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    424 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    425 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    426 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    427 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    428 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    429 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    430 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    431 
    432 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    433 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
    434 
    435 	struct evcnt sc_ev_tu;		/* Tx underrun */
    436 
    437 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    438 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    439 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    440 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    441 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    442 #endif /* WM_EVENT_COUNTERS */
    443 
    444 	/* This variable are used only on the 82547. */
    445 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    446 
    447 	uint32_t sc_ctrl;		/* prototype CTRL register */
    448 #if 0
    449 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    450 #endif
    451 	uint32_t sc_icr;		/* prototype interrupt bits */
    452 	uint32_t sc_itr;		/* prototype intr throttling reg */
    453 	uint32_t sc_tctl;		/* prototype TCTL register */
    454 	uint32_t sc_rctl;		/* prototype RCTL register */
    455 	uint32_t sc_txcw;		/* prototype TXCW register */
    456 	uint32_t sc_tipg;		/* prototype TIPG register */
    457 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    458 	uint32_t sc_pba;		/* prototype PBA register */
    459 
    460 	int sc_tbi_linkup;		/* TBI link status */
    461 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    462 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    463 
    464 	int sc_mchash_type;		/* multicast filter offset */
    465 
    466 	krndsource_t rnd_source;	/* random source */
    467 
    468 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    469 
    470 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    471 };
    472 
    473 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    474 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    475 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    476 
    477 #ifdef WM_MPSAFE
    478 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    479 #else
    480 #define CALLOUT_FLAGS	0
    481 #endif
    482 
    483 #define	WM_RXCHAIN_RESET(rxq)						\
    484 do {									\
    485 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    486 	*(rxq)->rxq_tailp = NULL;					\
    487 	(rxq)->rxq_len = 0;						\
    488 } while (/*CONSTCOND*/0)
    489 
    490 #define	WM_RXCHAIN_LINK(rxq, m)						\
    491 do {									\
    492 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    493 	(rxq)->rxq_tailp = &(m)->m_next;				\
    494 } while (/*CONSTCOND*/0)
    495 
    496 #ifdef WM_EVENT_COUNTERS
    497 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    498 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    499 #else
    500 #define	WM_EVCNT_INCR(ev)	/* nothing */
    501 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    502 #endif
    503 
    504 #define	CSR_READ(sc, reg)						\
    505 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    506 #define	CSR_WRITE(sc, reg, val)						\
    507 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    508 #define	CSR_WRITE_FLUSH(sc)						\
    509 	(void) CSR_READ((sc), WMREG_STATUS)
    510 
    511 #define ICH8_FLASH_READ32(sc, reg)					\
    512 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    513 	    (reg) + sc->sc_flashreg_offset)
    514 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    515 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    516 	    (reg) + sc->sc_flashreg_offset, (data))
    517 
    518 #define ICH8_FLASH_READ16(sc, reg)					\
    519 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    520 	    (reg) + sc->sc_flashreg_offset)
    521 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    522 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    523 	    (reg) + sc->sc_flashreg_offset, (data))
    524 
    525 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    526 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    527 
    528 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    529 #define	WM_CDTXADDR_HI(txq, x)						\
    530 	(sizeof(bus_addr_t) == 8 ?					\
    531 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    532 
    533 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    534 #define	WM_CDRXADDR_HI(rxq, x)						\
    535 	(sizeof(bus_addr_t) == 8 ?					\
    536 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    537 
    538 /*
    539  * Register read/write functions.
    540  * Other than CSR_{READ|WRITE}().
    541  */
    542 #if 0
    543 static inline uint32_t wm_io_read(struct wm_softc *, int);
    544 #endif
    545 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    546 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    547 	uint32_t, uint32_t);
    548 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    549 
    550 /*
    551  * Descriptor sync/init functions.
    552  */
    553 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    554 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    555 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    556 
    557 /*
    558  * Device driver interface functions and commonly used functions.
    559  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    560  */
    561 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    562 static int	wm_match(device_t, cfdata_t, void *);
    563 static void	wm_attach(device_t, device_t, void *);
    564 static int	wm_detach(device_t, int);
    565 static bool	wm_suspend(device_t, const pmf_qual_t *);
    566 static bool	wm_resume(device_t, const pmf_qual_t *);
    567 static void	wm_watchdog(struct ifnet *);
    568 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    569 static void	wm_tick(void *);
    570 static int	wm_ifflags_cb(struct ethercom *);
    571 static int	wm_ioctl(struct ifnet *, u_long, void *);
    572 /* MAC address related */
    573 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    574 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    575 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    576 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    577 static void	wm_set_filter(struct wm_softc *);
    578 /* Reset and init related */
    579 static void	wm_set_vlan(struct wm_softc *);
    580 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    581 static void	wm_get_auto_rd_done(struct wm_softc *);
    582 static void	wm_lan_init_done(struct wm_softc *);
    583 static void	wm_get_cfg_done(struct wm_softc *);
    584 static void	wm_initialize_hardware_bits(struct wm_softc *);
    585 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    586 static void	wm_reset(struct wm_softc *);
    587 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    588 static void	wm_rxdrain(struct wm_rxqueue *);
    589 static void	wm_rss_getkey(uint8_t *);
    590 static void	wm_init_rss(struct wm_softc *);
    591 static void	wm_adjust_qnum(struct wm_softc *, int);
    592 static int	wm_setup_legacy(struct wm_softc *);
    593 static int	wm_setup_msix(struct wm_softc *);
    594 static int	wm_init(struct ifnet *);
    595 static int	wm_init_locked(struct ifnet *);
    596 static void	wm_stop(struct ifnet *, int);
    597 static void	wm_stop_locked(struct ifnet *, int);
    598 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    599 static void	wm_82547_txfifo_stall(void *);
    600 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    601 /* DMA related */
    602 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    603 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    604 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    605 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    606     struct wm_txqueue *);
    607 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    608 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    609 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    610     struct wm_rxqueue *);
    611 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    612 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    613 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    614 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    615 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    616 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    617 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    618     struct wm_txqueue *);
    619 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    620     struct wm_rxqueue *);
    621 static int	wm_alloc_txrx_queues(struct wm_softc *);
    622 static void	wm_free_txrx_queues(struct wm_softc *);
    623 static int	wm_init_txrx_queues(struct wm_softc *);
    624 /* Start */
    625 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    626     uint32_t *, uint8_t *);
    627 static void	wm_start(struct ifnet *);
    628 static void	wm_start_locked(struct ifnet *);
    629 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    630     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    631 static void	wm_nq_start(struct ifnet *);
    632 static void	wm_nq_start_locked(struct ifnet *);
    633 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    634 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    635 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    636 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    637 /* Interrupt */
    638 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    639 static void	wm_rxeof(struct wm_rxqueue *);
    640 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    641 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    642 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    643 static void	wm_linkintr(struct wm_softc *, uint32_t);
    644 static int	wm_intr_legacy(void *);
    645 static int	wm_txrxintr_msix(void *);
    646 static int	wm_linkintr_msix(void *);
    647 
    648 /*
    649  * Media related.
    650  * GMII, SGMII, TBI, SERDES and SFP.
    651  */
    652 /* Common */
    653 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    654 /* GMII related */
    655 static void	wm_gmii_reset(struct wm_softc *);
    656 static int	wm_get_phy_id_82575(struct wm_softc *);
    657 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    658 static int	wm_gmii_mediachange(struct ifnet *);
    659 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    660 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    661 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    662 static int	wm_gmii_i82543_readreg(device_t, int, int);
    663 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    664 static int	wm_gmii_i82544_readreg(device_t, int, int);
    665 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    666 static int	wm_gmii_i80003_readreg(device_t, int, int);
    667 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    668 static int	wm_gmii_bm_readreg(device_t, int, int);
    669 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    670 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    671 static int	wm_gmii_hv_readreg(device_t, int, int);
    672 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    673 static int	wm_gmii_82580_readreg(device_t, int, int);
    674 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    675 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    676 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    677 static void	wm_gmii_statchg(struct ifnet *);
    678 static int	wm_kmrn_readreg(struct wm_softc *, int);
    679 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    680 /* SGMII */
    681 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    682 static int	wm_sgmii_readreg(device_t, int, int);
    683 static void	wm_sgmii_writereg(device_t, int, int, int);
    684 /* TBI related */
    685 static void	wm_tbi_mediainit(struct wm_softc *);
    686 static int	wm_tbi_mediachange(struct ifnet *);
    687 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    688 static int	wm_check_for_link(struct wm_softc *);
    689 static void	wm_tbi_tick(struct wm_softc *);
    690 /* SERDES related */
    691 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    692 static int	wm_serdes_mediachange(struct ifnet *);
    693 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    694 static void	wm_serdes_tick(struct wm_softc *);
    695 /* SFP related */
    696 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    697 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    698 
    699 /*
    700  * NVM related.
    701  * Microwire, SPI (w/wo EERD) and Flash.
    702  */
    703 /* Misc functions */
    704 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    705 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    706 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    707 /* Microwire */
    708 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    709 /* SPI */
    710 static int	wm_nvm_ready_spi(struct wm_softc *);
    711 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    712 /* Using with EERD */
    713 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    714 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    715 /* Flash */
    716 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    717     unsigned int *);
    718 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    719 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    720 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    721 	uint32_t *);
    722 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    723 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    724 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    725 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    726 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    727 /* iNVM */
    728 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    729 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    730 /* Lock, detecting NVM type, validate checksum and read */
    731 static int	wm_nvm_acquire(struct wm_softc *);
    732 static void	wm_nvm_release(struct wm_softc *);
    733 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    734 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    735 static int	wm_nvm_validate_checksum(struct wm_softc *);
    736 static void	wm_nvm_version_invm(struct wm_softc *);
    737 static void	wm_nvm_version(struct wm_softc *);
    738 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    739 
    740 /*
    741  * Hardware semaphores.
    742  * Very complexed...
    743  */
    744 static int	wm_get_swsm_semaphore(struct wm_softc *);
    745 static void	wm_put_swsm_semaphore(struct wm_softc *);
    746 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    747 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    748 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    749 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    750 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    751 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    752 
    753 /*
    754  * Management mode and power management related subroutines.
    755  * BMC, AMT, suspend/resume and EEE.
    756  */
    757 #ifdef WM_WOL
    758 static int	wm_check_mng_mode(struct wm_softc *);
    759 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    760 static int	wm_check_mng_mode_82574(struct wm_softc *);
    761 static int	wm_check_mng_mode_generic(struct wm_softc *);
    762 #endif
    763 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    764 static bool	wm_phy_resetisblocked(struct wm_softc *);
    765 static void	wm_get_hw_control(struct wm_softc *);
    766 static void	wm_release_hw_control(struct wm_softc *);
    767 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    768 static void	wm_smbustopci(struct wm_softc *);
    769 static void	wm_init_manageability(struct wm_softc *);
    770 static void	wm_release_manageability(struct wm_softc *);
    771 static void	wm_get_wakeup(struct wm_softc *);
    772 #ifdef WM_WOL
    773 static void	wm_enable_phy_wakeup(struct wm_softc *);
    774 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    775 static void	wm_enable_wakeup(struct wm_softc *);
    776 #endif
    777 /* LPLU (Low Power Link Up) */
    778 static void	wm_lplu_d0_disable(struct wm_softc *);
    779 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    780 /* EEE */
    781 static void	wm_set_eee_i350(struct wm_softc *);
    782 
    783 /*
    784  * Workarounds (mainly PHY related).
    785  * Basically, PHY's workarounds are in the PHY drivers.
    786  */
    787 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    788 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    789 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    790 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    791 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    792 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    793 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    794 static void	wm_reset_init_script_82575(struct wm_softc *);
    795 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    796 static void	wm_pll_workaround_i210(struct wm_softc *);
    797 
    798 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    799     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    800 
    801 /*
    802  * Devices supported by this driver.
    803  */
    804 static const struct wm_product {
    805 	pci_vendor_id_t		wmp_vendor;
    806 	pci_product_id_t	wmp_product;
    807 	const char		*wmp_name;
    808 	wm_chip_type		wmp_type;
    809 	uint32_t		wmp_flags;
    810 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    811 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    812 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    813 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    814 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    815 } wm_products[] = {
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    817 	  "Intel i82542 1000BASE-X Ethernet",
    818 	  WM_T_82542_2_1,	WMP_F_FIBER },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    821 	  "Intel i82543GC 1000BASE-X Ethernet",
    822 	  WM_T_82543,		WMP_F_FIBER },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    825 	  "Intel i82543GC 1000BASE-T Ethernet",
    826 	  WM_T_82543,		WMP_F_COPPER },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    829 	  "Intel i82544EI 1000BASE-T Ethernet",
    830 	  WM_T_82544,		WMP_F_COPPER },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    833 	  "Intel i82544EI 1000BASE-X Ethernet",
    834 	  WM_T_82544,		WMP_F_FIBER },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    837 	  "Intel i82544GC 1000BASE-T Ethernet",
    838 	  WM_T_82544,		WMP_F_COPPER },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    841 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    842 	  WM_T_82544,		WMP_F_COPPER },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    845 	  "Intel i82540EM 1000BASE-T Ethernet",
    846 	  WM_T_82540,		WMP_F_COPPER },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    849 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    850 	  WM_T_82540,		WMP_F_COPPER },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    853 	  "Intel i82540EP 1000BASE-T Ethernet",
    854 	  WM_T_82540,		WMP_F_COPPER },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    857 	  "Intel i82540EP 1000BASE-T Ethernet",
    858 	  WM_T_82540,		WMP_F_COPPER },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    861 	  "Intel i82540EP 1000BASE-T Ethernet",
    862 	  WM_T_82540,		WMP_F_COPPER },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    865 	  "Intel i82545EM 1000BASE-T Ethernet",
    866 	  WM_T_82545,		WMP_F_COPPER },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    869 	  "Intel i82545GM 1000BASE-T Ethernet",
    870 	  WM_T_82545_3,		WMP_F_COPPER },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    873 	  "Intel i82545GM 1000BASE-X Ethernet",
    874 	  WM_T_82545_3,		WMP_F_FIBER },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    877 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    878 	  WM_T_82545_3,		WMP_F_SERDES },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    881 	  "Intel i82546EB 1000BASE-T Ethernet",
    882 	  WM_T_82546,		WMP_F_COPPER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    885 	  "Intel i82546EB 1000BASE-T Ethernet",
    886 	  WM_T_82546,		WMP_F_COPPER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    889 	  "Intel i82545EM 1000BASE-X Ethernet",
    890 	  WM_T_82545,		WMP_F_FIBER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    893 	  "Intel i82546EB 1000BASE-X Ethernet",
    894 	  WM_T_82546,		WMP_F_FIBER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    897 	  "Intel i82546GB 1000BASE-T Ethernet",
    898 	  WM_T_82546_3,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    901 	  "Intel i82546GB 1000BASE-X Ethernet",
    902 	  WM_T_82546_3,		WMP_F_FIBER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    905 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    906 	  WM_T_82546_3,		WMP_F_SERDES },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    909 	  "i82546GB quad-port Gigabit Ethernet",
    910 	  WM_T_82546_3,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    913 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    914 	  WM_T_82546_3,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    917 	  "Intel PRO/1000MT (82546GB)",
    918 	  WM_T_82546_3,		WMP_F_COPPER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    921 	  "Intel i82541EI 1000BASE-T Ethernet",
    922 	  WM_T_82541,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    925 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    926 	  WM_T_82541,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    929 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    930 	  WM_T_82541,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    933 	  "Intel i82541ER 1000BASE-T Ethernet",
    934 	  WM_T_82541_2,		WMP_F_COPPER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    937 	  "Intel i82541GI 1000BASE-T Ethernet",
    938 	  WM_T_82541_2,		WMP_F_COPPER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    941 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    942 	  WM_T_82541_2,		WMP_F_COPPER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    945 	  "Intel i82541PI 1000BASE-T Ethernet",
    946 	  WM_T_82541_2,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    949 	  "Intel i82547EI 1000BASE-T Ethernet",
    950 	  WM_T_82547,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    953 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    954 	  WM_T_82547,		WMP_F_COPPER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    957 	  "Intel i82547GI 1000BASE-T Ethernet",
    958 	  WM_T_82547_2,		WMP_F_COPPER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    961 	  "Intel PRO/1000 PT (82571EB)",
    962 	  WM_T_82571,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    965 	  "Intel PRO/1000 PF (82571EB)",
    966 	  WM_T_82571,		WMP_F_FIBER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    969 	  "Intel PRO/1000 PB (82571EB)",
    970 	  WM_T_82571,		WMP_F_SERDES },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    973 	  "Intel PRO/1000 QT (82571EB)",
    974 	  WM_T_82571,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    977 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    978 	  WM_T_82571,		WMP_F_COPPER, },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    981 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    982 	  WM_T_82571,		WMP_F_COPPER, },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    985 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    986 	  WM_T_82571,		WMP_F_SERDES, },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    989 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    990 	  WM_T_82571,		WMP_F_SERDES, },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    993 	  "Intel 82571EB Quad 1000baseX Ethernet",
    994 	  WM_T_82571,		WMP_F_FIBER, },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    997 	  "Intel i82572EI 1000baseT Ethernet",
    998 	  WM_T_82572,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1001 	  "Intel i82572EI 1000baseX Ethernet",
   1002 	  WM_T_82572,		WMP_F_FIBER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1005 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1006 	  WM_T_82572,		WMP_F_SERDES },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1009 	  "Intel i82572EI 1000baseT Ethernet",
   1010 	  WM_T_82572,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1013 	  "Intel i82573E",
   1014 	  WM_T_82573,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1017 	  "Intel i82573E IAMT",
   1018 	  WM_T_82573,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1021 	  "Intel i82573L Gigabit Ethernet",
   1022 	  WM_T_82573,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1025 	  "Intel i82574L",
   1026 	  WM_T_82574,		WMP_F_COPPER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1029 	  "Intel i82574L",
   1030 	  WM_T_82574,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1033 	  "Intel i82583V",
   1034 	  WM_T_82583,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1037 	  "i80003 dual 1000baseT Ethernet",
   1038 	  WM_T_80003,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1041 	  "i80003 dual 1000baseX Ethernet",
   1042 	  WM_T_80003,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1045 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1046 	  WM_T_80003,		WMP_F_SERDES },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1049 	  "Intel i80003 1000baseT Ethernet",
   1050 	  WM_T_80003,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1053 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1054 	  WM_T_80003,		WMP_F_SERDES },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1057 	  "Intel i82801H (M_AMT) LAN Controller",
   1058 	  WM_T_ICH8,		WMP_F_COPPER },
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1060 	  "Intel i82801H (AMT) LAN Controller",
   1061 	  WM_T_ICH8,		WMP_F_COPPER },
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1063 	  "Intel i82801H LAN Controller",
   1064 	  WM_T_ICH8,		WMP_F_COPPER },
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1066 	  "Intel i82801H (IFE) LAN Controller",
   1067 	  WM_T_ICH8,		WMP_F_COPPER },
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1069 	  "Intel i82801H (M) LAN Controller",
   1070 	  WM_T_ICH8,		WMP_F_COPPER },
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1072 	  "Intel i82801H IFE (GT) LAN Controller",
   1073 	  WM_T_ICH8,		WMP_F_COPPER },
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1075 	  "Intel i82801H IFE (G) LAN Controller",
   1076 	  WM_T_ICH8,		WMP_F_COPPER },
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1078 	  "82801I (AMT) LAN Controller",
   1079 	  WM_T_ICH9,		WMP_F_COPPER },
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1081 	  "82801I LAN Controller",
   1082 	  WM_T_ICH9,		WMP_F_COPPER },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1084 	  "82801I (G) LAN Controller",
   1085 	  WM_T_ICH9,		WMP_F_COPPER },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1087 	  "82801I (GT) LAN Controller",
   1088 	  WM_T_ICH9,		WMP_F_COPPER },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1090 	  "82801I (C) LAN Controller",
   1091 	  WM_T_ICH9,		WMP_F_COPPER },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1093 	  "82801I mobile LAN Controller",
   1094 	  WM_T_ICH9,		WMP_F_COPPER },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1096 	  "82801I mobile (V) LAN Controller",
   1097 	  WM_T_ICH9,		WMP_F_COPPER },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1099 	  "82801I mobile (AMT) LAN Controller",
   1100 	  WM_T_ICH9,		WMP_F_COPPER },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1102 	  "82567LM-4 LAN Controller",
   1103 	  WM_T_ICH9,		WMP_F_COPPER },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1105 	  "82567V-3 LAN Controller",
   1106 	  WM_T_ICH9,		WMP_F_COPPER },
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1108 	  "82567LM-2 LAN Controller",
   1109 	  WM_T_ICH10,		WMP_F_COPPER },
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1111 	  "82567LF-2 LAN Controller",
   1112 	  WM_T_ICH10,		WMP_F_COPPER },
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1114 	  "82567LM-3 LAN Controller",
   1115 	  WM_T_ICH10,		WMP_F_COPPER },
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1117 	  "82567LF-3 LAN Controller",
   1118 	  WM_T_ICH10,		WMP_F_COPPER },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1120 	  "82567V-2 LAN Controller",
   1121 	  WM_T_ICH10,		WMP_F_COPPER },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1123 	  "82567V-3? LAN Controller",
   1124 	  WM_T_ICH10,		WMP_F_COPPER },
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1126 	  "HANKSVILLE LAN Controller",
   1127 	  WM_T_ICH10,		WMP_F_COPPER },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1129 	  "PCH LAN (82577LM) Controller",
   1130 	  WM_T_PCH,		WMP_F_COPPER },
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1132 	  "PCH LAN (82577LC) Controller",
   1133 	  WM_T_PCH,		WMP_F_COPPER },
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1135 	  "PCH LAN (82578DM) Controller",
   1136 	  WM_T_PCH,		WMP_F_COPPER },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1138 	  "PCH LAN (82578DC) Controller",
   1139 	  WM_T_PCH,		WMP_F_COPPER },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1141 	  "PCH2 LAN (82579LM) Controller",
   1142 	  WM_T_PCH2,		WMP_F_COPPER },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1144 	  "PCH2 LAN (82579V) Controller",
   1145 	  WM_T_PCH2,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1147 	  "82575EB dual-1000baseT Ethernet",
   1148 	  WM_T_82575,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1150 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1151 	  WM_T_82575,		WMP_F_SERDES },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1153 	  "82575GB quad-1000baseT Ethernet",
   1154 	  WM_T_82575,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1156 	  "82575GB quad-1000baseT Ethernet (PM)",
   1157 	  WM_T_82575,		WMP_F_COPPER },
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1159 	  "82576 1000BaseT Ethernet",
   1160 	  WM_T_82576,		WMP_F_COPPER },
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1162 	  "82576 1000BaseX Ethernet",
   1163 	  WM_T_82576,		WMP_F_FIBER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1166 	  "82576 gigabit Ethernet (SERDES)",
   1167 	  WM_T_82576,		WMP_F_SERDES },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1170 	  "82576 quad-1000BaseT Ethernet",
   1171 	  WM_T_82576,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1174 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1175 	  WM_T_82576,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1178 	  "82576 gigabit Ethernet",
   1179 	  WM_T_82576,		WMP_F_COPPER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1182 	  "82576 gigabit Ethernet (SERDES)",
   1183 	  WM_T_82576,		WMP_F_SERDES },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1185 	  "82576 quad-gigabit Ethernet (SERDES)",
   1186 	  WM_T_82576,		WMP_F_SERDES },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1189 	  "82580 1000BaseT Ethernet",
   1190 	  WM_T_82580,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1192 	  "82580 1000BaseX Ethernet",
   1193 	  WM_T_82580,		WMP_F_FIBER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1196 	  "82580 1000BaseT Ethernet (SERDES)",
   1197 	  WM_T_82580,		WMP_F_SERDES },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1200 	  "82580 gigabit Ethernet (SGMII)",
   1201 	  WM_T_82580,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1203 	  "82580 dual-1000BaseT Ethernet",
   1204 	  WM_T_82580,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1207 	  "82580 quad-1000BaseX Ethernet",
   1208 	  WM_T_82580,		WMP_F_FIBER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1211 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1212 	  WM_T_82580,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1215 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1216 	  WM_T_82580,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1219 	  "DH89XXCC 1000BASE-KX Ethernet",
   1220 	  WM_T_82580,		WMP_F_SERDES },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1223 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1224 	  WM_T_82580,		WMP_F_SERDES },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1227 	  "I350 Gigabit Network Connection",
   1228 	  WM_T_I350,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1231 	  "I350 Gigabit Fiber Network Connection",
   1232 	  WM_T_I350,		WMP_F_FIBER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1235 	  "I350 Gigabit Backplane Connection",
   1236 	  WM_T_I350,		WMP_F_SERDES },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1239 	  "I350 Quad Port Gigabit Ethernet",
   1240 	  WM_T_I350,		WMP_F_SERDES },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1243 	  "I350 Gigabit Connection",
   1244 	  WM_T_I350,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1247 	  "I354 Gigabit Ethernet (KX)",
   1248 	  WM_T_I354,		WMP_F_SERDES },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1251 	  "I354 Gigabit Ethernet (SGMII)",
   1252 	  WM_T_I354,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1255 	  "I354 Gigabit Ethernet (2.5G)",
   1256 	  WM_T_I354,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1259 	  "I210-T1 Ethernet Server Adapter",
   1260 	  WM_T_I210,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1263 	  "I210 Ethernet (Copper OEM)",
   1264 	  WM_T_I210,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1267 	  "I210 Ethernet (Copper IT)",
   1268 	  WM_T_I210,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1271 	  "I210 Ethernet (FLASH less)",
   1272 	  WM_T_I210,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1275 	  "I210 Gigabit Ethernet (Fiber)",
   1276 	  WM_T_I210,		WMP_F_FIBER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1279 	  "I210 Gigabit Ethernet (SERDES)",
   1280 	  WM_T_I210,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1283 	  "I210 Gigabit Ethernet (FLASH less)",
   1284 	  WM_T_I210,		WMP_F_SERDES },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1287 	  "I210 Gigabit Ethernet (SGMII)",
   1288 	  WM_T_I210,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1291 	  "I211 Ethernet (COPPER)",
   1292 	  WM_T_I211,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1294 	  "I217 V Ethernet Connection",
   1295 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1297 	  "I217 LM Ethernet Connection",
   1298 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1300 	  "I218 V Ethernet Connection",
   1301 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1303 	  "I218 V Ethernet Connection",
   1304 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1306 	  "I218 V Ethernet Connection",
   1307 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1309 	  "I218 LM Ethernet Connection",
   1310 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1312 	  "I218 LM Ethernet Connection",
   1313 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1315 	  "I218 LM Ethernet Connection",
   1316 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1317 #if 0
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1319 	  "I219 V Ethernet Connection",
   1320 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1322 	  "I219 V Ethernet Connection",
   1323 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1325 	  "I219 LM Ethernet Connection",
   1326 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1328 	  "I219 LM Ethernet Connection",
   1329 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1330 #endif
   1331 	{ 0,			0,
   1332 	  NULL,
   1333 	  0,			0 },
   1334 };
   1335 
   1336 #ifdef WM_EVENT_COUNTERS
   1337 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1338 #endif /* WM_EVENT_COUNTERS */
   1339 
   1340 
   1341 /*
   1342  * Register read/write functions.
   1343  * Other than CSR_{READ|WRITE}().
   1344  */
   1345 
   1346 #if 0 /* Not currently used */
   1347 static inline uint32_t
   1348 wm_io_read(struct wm_softc *sc, int reg)
   1349 {
   1350 
   1351 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1352 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1353 }
   1354 #endif
   1355 
   1356 static inline void
   1357 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1358 {
   1359 
   1360 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1361 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1362 }
   1363 
   1364 static inline void
   1365 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1366     uint32_t data)
   1367 {
   1368 	uint32_t regval;
   1369 	int i;
   1370 
   1371 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1372 
   1373 	CSR_WRITE(sc, reg, regval);
   1374 
   1375 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1376 		delay(5);
   1377 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1378 			break;
   1379 	}
   1380 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1381 		aprint_error("%s: WARNING:"
   1382 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1383 		    device_xname(sc->sc_dev), reg);
   1384 	}
   1385 }
   1386 
   1387 static inline void
   1388 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1389 {
   1390 	wa->wa_low = htole32(v & 0xffffffffU);
   1391 	if (sizeof(bus_addr_t) == 8)
   1392 		wa->wa_high = htole32((uint64_t) v >> 32);
   1393 	else
   1394 		wa->wa_high = 0;
   1395 }
   1396 
   1397 /*
   1398  * Descriptor sync/init functions.
   1399  */
   1400 static inline void
   1401 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1402 {
   1403 	struct wm_softc *sc = txq->txq_sc;
   1404 
   1405 	/* If it will wrap around, sync to the end of the ring. */
   1406 	if ((start + num) > WM_NTXDESC(txq)) {
   1407 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1408 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1409 		    (WM_NTXDESC(txq) - start), ops);
   1410 		num -= (WM_NTXDESC(txq) - start);
   1411 		start = 0;
   1412 	}
   1413 
   1414 	/* Now sync whatever is left. */
   1415 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1416 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1417 }
   1418 
   1419 static inline void
   1420 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1421 {
   1422 	struct wm_softc *sc = rxq->rxq_sc;
   1423 
   1424 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1425 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1426 }
   1427 
   1428 static inline void
   1429 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1430 {
   1431 	struct wm_softc *sc = rxq->rxq_sc;
   1432 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1433 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1434 	struct mbuf *m = rxs->rxs_mbuf;
   1435 
   1436 	/*
   1437 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1438 	 * so that the payload after the Ethernet header is aligned
   1439 	 * to a 4-byte boundary.
   1440 
   1441 	 * XXX BRAINDAMAGE ALERT!
   1442 	 * The stupid chip uses the same size for every buffer, which
   1443 	 * is set in the Receive Control register.  We are using the 2K
   1444 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1445 	 * reason, we can't "scoot" packets longer than the standard
   1446 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1447 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1448 	 * the upper layer copy the headers.
   1449 	 */
   1450 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1451 
   1452 	wm_set_dma_addr(&rxd->wrx_addr,
   1453 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1454 	rxd->wrx_len = 0;
   1455 	rxd->wrx_cksum = 0;
   1456 	rxd->wrx_status = 0;
   1457 	rxd->wrx_errors = 0;
   1458 	rxd->wrx_special = 0;
   1459 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1460 
   1461 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1462 }
   1463 
   1464 /*
   1465  * Device driver interface functions and commonly used functions.
   1466  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1467  */
   1468 
   1469 /* Lookup supported device table */
   1470 static const struct wm_product *
   1471 wm_lookup(const struct pci_attach_args *pa)
   1472 {
   1473 	const struct wm_product *wmp;
   1474 
   1475 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1476 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1477 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1478 			return wmp;
   1479 	}
   1480 	return NULL;
   1481 }
   1482 
   1483 /* The match function (ca_match) */
   1484 static int
   1485 wm_match(device_t parent, cfdata_t cf, void *aux)
   1486 {
   1487 	struct pci_attach_args *pa = aux;
   1488 
   1489 	if (wm_lookup(pa) != NULL)
   1490 		return 1;
   1491 
   1492 	return 0;
   1493 }
   1494 
   1495 /* The attach function (ca_attach) */
   1496 static void
   1497 wm_attach(device_t parent, device_t self, void *aux)
   1498 {
   1499 	struct wm_softc *sc = device_private(self);
   1500 	struct pci_attach_args *pa = aux;
   1501 	prop_dictionary_t dict;
   1502 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1503 	pci_chipset_tag_t pc = pa->pa_pc;
   1504 	int counts[PCI_INTR_TYPE_SIZE];
   1505 	pci_intr_type_t max_type;
   1506 	const char *eetype, *xname;
   1507 	bus_space_tag_t memt;
   1508 	bus_space_handle_t memh;
   1509 	bus_size_t memsize;
   1510 	int memh_valid;
   1511 	int i, error;
   1512 	const struct wm_product *wmp;
   1513 	prop_data_t ea;
   1514 	prop_number_t pn;
   1515 	uint8_t enaddr[ETHER_ADDR_LEN];
   1516 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1517 	pcireg_t preg, memtype;
   1518 	uint16_t eeprom_data, apme_mask;
   1519 	bool force_clear_smbi;
   1520 	uint32_t link_mode;
   1521 	uint32_t reg;
   1522 
   1523 	sc->sc_dev = self;
   1524 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1525 	sc->sc_stopping = false;
   1526 
   1527 	wmp = wm_lookup(pa);
   1528 #ifdef DIAGNOSTIC
   1529 	if (wmp == NULL) {
   1530 		printf("\n");
   1531 		panic("wm_attach: impossible");
   1532 	}
   1533 #endif
   1534 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1535 
   1536 	sc->sc_pc = pa->pa_pc;
   1537 	sc->sc_pcitag = pa->pa_tag;
   1538 
   1539 	if (pci_dma64_available(pa))
   1540 		sc->sc_dmat = pa->pa_dmat64;
   1541 	else
   1542 		sc->sc_dmat = pa->pa_dmat;
   1543 
   1544 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1545 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1546 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1547 
   1548 	sc->sc_type = wmp->wmp_type;
   1549 	if (sc->sc_type < WM_T_82543) {
   1550 		if (sc->sc_rev < 2) {
   1551 			aprint_error_dev(sc->sc_dev,
   1552 			    "i82542 must be at least rev. 2\n");
   1553 			return;
   1554 		}
   1555 		if (sc->sc_rev < 3)
   1556 			sc->sc_type = WM_T_82542_2_0;
   1557 	}
   1558 
   1559 	/*
   1560 	 * Disable MSI for Errata:
   1561 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1562 	 *
   1563 	 *  82544: Errata 25
   1564 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1565 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1566 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1567 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1568 	 *
   1569 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1570 	 *
   1571 	 *  82571 & 82572: Errata 63
   1572 	 */
   1573 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1574 	    || (sc->sc_type == WM_T_82572))
   1575 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1576 
   1577 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1578 	    || (sc->sc_type == WM_T_82580)
   1579 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1580 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1581 		sc->sc_flags |= WM_F_NEWQUEUE;
   1582 
   1583 	/* Set device properties (mactype) */
   1584 	dict = device_properties(sc->sc_dev);
   1585 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1586 
   1587 	/*
   1588 	 * Map the device.  All devices support memory-mapped acccess,
   1589 	 * and it is really required for normal operation.
   1590 	 */
   1591 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1592 	switch (memtype) {
   1593 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1594 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1595 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1596 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1597 		break;
   1598 	default:
   1599 		memh_valid = 0;
   1600 		break;
   1601 	}
   1602 
   1603 	if (memh_valid) {
   1604 		sc->sc_st = memt;
   1605 		sc->sc_sh = memh;
   1606 		sc->sc_ss = memsize;
   1607 	} else {
   1608 		aprint_error_dev(sc->sc_dev,
   1609 		    "unable to map device registers\n");
   1610 		return;
   1611 	}
   1612 
   1613 	/*
   1614 	 * In addition, i82544 and later support I/O mapped indirect
   1615 	 * register access.  It is not desirable (nor supported in
   1616 	 * this driver) to use it for normal operation, though it is
   1617 	 * required to work around bugs in some chip versions.
   1618 	 */
   1619 	if (sc->sc_type >= WM_T_82544) {
   1620 		/* First we have to find the I/O BAR. */
   1621 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1622 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1623 			if (memtype == PCI_MAPREG_TYPE_IO)
   1624 				break;
   1625 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1626 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1627 				i += 4;	/* skip high bits, too */
   1628 		}
   1629 		if (i < PCI_MAPREG_END) {
   1630 			/*
   1631 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1632 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1633 			 * It's no problem because newer chips has no this
   1634 			 * bug.
   1635 			 *
   1636 			 * The i8254x doesn't apparently respond when the
   1637 			 * I/O BAR is 0, which looks somewhat like it's not
   1638 			 * been configured.
   1639 			 */
   1640 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1641 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1642 				aprint_error_dev(sc->sc_dev,
   1643 				    "WARNING: I/O BAR at zero.\n");
   1644 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1645 					0, &sc->sc_iot, &sc->sc_ioh,
   1646 					NULL, &sc->sc_ios) == 0) {
   1647 				sc->sc_flags |= WM_F_IOH_VALID;
   1648 			} else {
   1649 				aprint_error_dev(sc->sc_dev,
   1650 				    "WARNING: unable to map I/O space\n");
   1651 			}
   1652 		}
   1653 
   1654 	}
   1655 
   1656 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1657 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1658 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1659 	if (sc->sc_type < WM_T_82542_2_1)
   1660 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1661 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1662 
   1663 	/* power up chip */
   1664 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1665 	    NULL)) && error != EOPNOTSUPP) {
   1666 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1667 		return;
   1668 	}
   1669 
   1670 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1671 
   1672 	/* Allocation settings */
   1673 	max_type = PCI_INTR_TYPE_MSIX;
   1674 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1675 	counts[PCI_INTR_TYPE_MSI] = 1;
   1676 	counts[PCI_INTR_TYPE_INTX] = 1;
   1677 
   1678 alloc_retry:
   1679 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1680 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1681 		return;
   1682 	}
   1683 
   1684 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1685 		error = wm_setup_msix(sc);
   1686 		if (error) {
   1687 			pci_intr_release(pc, sc->sc_intrs,
   1688 			    counts[PCI_INTR_TYPE_MSIX]);
   1689 
   1690 			/* Setup for MSI: Disable MSI-X */
   1691 			max_type = PCI_INTR_TYPE_MSI;
   1692 			counts[PCI_INTR_TYPE_MSI] = 1;
   1693 			counts[PCI_INTR_TYPE_INTX] = 1;
   1694 			goto alloc_retry;
   1695 		}
   1696 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1697 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1698 		error = wm_setup_legacy(sc);
   1699 		if (error) {
   1700 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1701 			    counts[PCI_INTR_TYPE_MSI]);
   1702 
   1703 			/* The next try is for INTx: Disable MSI */
   1704 			max_type = PCI_INTR_TYPE_INTX;
   1705 			counts[PCI_INTR_TYPE_INTX] = 1;
   1706 			goto alloc_retry;
   1707 		}
   1708 	} else {
   1709 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1710 		error = wm_setup_legacy(sc);
   1711 		if (error) {
   1712 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1713 			    counts[PCI_INTR_TYPE_INTX]);
   1714 			return;
   1715 		}
   1716 	}
   1717 
   1718 	/*
   1719 	 * Check the function ID (unit number of the chip).
   1720 	 */
   1721 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1722 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1723 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1724 	    || (sc->sc_type == WM_T_82580)
   1725 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1726 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1727 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1728 	else
   1729 		sc->sc_funcid = 0;
   1730 
   1731 	/*
   1732 	 * Determine a few things about the bus we're connected to.
   1733 	 */
   1734 	if (sc->sc_type < WM_T_82543) {
   1735 		/* We don't really know the bus characteristics here. */
   1736 		sc->sc_bus_speed = 33;
   1737 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1738 		/*
   1739 		 * CSA (Communication Streaming Architecture) is about as fast
   1740 		 * a 32-bit 66MHz PCI Bus.
   1741 		 */
   1742 		sc->sc_flags |= WM_F_CSA;
   1743 		sc->sc_bus_speed = 66;
   1744 		aprint_verbose_dev(sc->sc_dev,
   1745 		    "Communication Streaming Architecture\n");
   1746 		if (sc->sc_type == WM_T_82547) {
   1747 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1748 			callout_setfunc(&sc->sc_txfifo_ch,
   1749 					wm_82547_txfifo_stall, sc);
   1750 			aprint_verbose_dev(sc->sc_dev,
   1751 			    "using 82547 Tx FIFO stall work-around\n");
   1752 		}
   1753 	} else if (sc->sc_type >= WM_T_82571) {
   1754 		sc->sc_flags |= WM_F_PCIE;
   1755 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1756 		    && (sc->sc_type != WM_T_ICH10)
   1757 		    && (sc->sc_type != WM_T_PCH)
   1758 		    && (sc->sc_type != WM_T_PCH2)
   1759 		    && (sc->sc_type != WM_T_PCH_LPT)
   1760 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1761 			/* ICH* and PCH* have no PCIe capability registers */
   1762 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1763 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1764 				NULL) == 0)
   1765 				aprint_error_dev(sc->sc_dev,
   1766 				    "unable to find PCIe capability\n");
   1767 		}
   1768 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1769 	} else {
   1770 		reg = CSR_READ(sc, WMREG_STATUS);
   1771 		if (reg & STATUS_BUS64)
   1772 			sc->sc_flags |= WM_F_BUS64;
   1773 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1774 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1775 
   1776 			sc->sc_flags |= WM_F_PCIX;
   1777 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1778 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1779 				aprint_error_dev(sc->sc_dev,
   1780 				    "unable to find PCIX capability\n");
   1781 			else if (sc->sc_type != WM_T_82545_3 &&
   1782 				 sc->sc_type != WM_T_82546_3) {
   1783 				/*
   1784 				 * Work around a problem caused by the BIOS
   1785 				 * setting the max memory read byte count
   1786 				 * incorrectly.
   1787 				 */
   1788 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1789 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1790 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1791 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1792 
   1793 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1794 				    PCIX_CMD_BYTECNT_SHIFT;
   1795 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1796 				    PCIX_STATUS_MAXB_SHIFT;
   1797 				if (bytecnt > maxb) {
   1798 					aprint_verbose_dev(sc->sc_dev,
   1799 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1800 					    512 << bytecnt, 512 << maxb);
   1801 					pcix_cmd = (pcix_cmd &
   1802 					    ~PCIX_CMD_BYTECNT_MASK) |
   1803 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1804 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1805 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1806 					    pcix_cmd);
   1807 				}
   1808 			}
   1809 		}
   1810 		/*
   1811 		 * The quad port adapter is special; it has a PCIX-PCIX
   1812 		 * bridge on the board, and can run the secondary bus at
   1813 		 * a higher speed.
   1814 		 */
   1815 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1816 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1817 								      : 66;
   1818 		} else if (sc->sc_flags & WM_F_PCIX) {
   1819 			switch (reg & STATUS_PCIXSPD_MASK) {
   1820 			case STATUS_PCIXSPD_50_66:
   1821 				sc->sc_bus_speed = 66;
   1822 				break;
   1823 			case STATUS_PCIXSPD_66_100:
   1824 				sc->sc_bus_speed = 100;
   1825 				break;
   1826 			case STATUS_PCIXSPD_100_133:
   1827 				sc->sc_bus_speed = 133;
   1828 				break;
   1829 			default:
   1830 				aprint_error_dev(sc->sc_dev,
   1831 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1832 				    reg & STATUS_PCIXSPD_MASK);
   1833 				sc->sc_bus_speed = 66;
   1834 				break;
   1835 			}
   1836 		} else
   1837 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1838 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1839 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1840 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1841 	}
   1842 
   1843 	/* clear interesting stat counters */
   1844 	CSR_READ(sc, WMREG_COLC);
   1845 	CSR_READ(sc, WMREG_RXERRC);
   1846 
   1847 	/* get PHY control from SMBus to PCIe */
   1848 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1849 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1850 		wm_smbustopci(sc);
   1851 
   1852 	/* Reset the chip to a known state. */
   1853 	wm_reset(sc);
   1854 
   1855 	/* Get some information about the EEPROM. */
   1856 	switch (sc->sc_type) {
   1857 	case WM_T_82542_2_0:
   1858 	case WM_T_82542_2_1:
   1859 	case WM_T_82543:
   1860 	case WM_T_82544:
   1861 		/* Microwire */
   1862 		sc->sc_nvm_wordsize = 64;
   1863 		sc->sc_nvm_addrbits = 6;
   1864 		break;
   1865 	case WM_T_82540:
   1866 	case WM_T_82545:
   1867 	case WM_T_82545_3:
   1868 	case WM_T_82546:
   1869 	case WM_T_82546_3:
   1870 		/* Microwire */
   1871 		reg = CSR_READ(sc, WMREG_EECD);
   1872 		if (reg & EECD_EE_SIZE) {
   1873 			sc->sc_nvm_wordsize = 256;
   1874 			sc->sc_nvm_addrbits = 8;
   1875 		} else {
   1876 			sc->sc_nvm_wordsize = 64;
   1877 			sc->sc_nvm_addrbits = 6;
   1878 		}
   1879 		sc->sc_flags |= WM_F_LOCK_EECD;
   1880 		break;
   1881 	case WM_T_82541:
   1882 	case WM_T_82541_2:
   1883 	case WM_T_82547:
   1884 	case WM_T_82547_2:
   1885 		sc->sc_flags |= WM_F_LOCK_EECD;
   1886 		reg = CSR_READ(sc, WMREG_EECD);
   1887 		if (reg & EECD_EE_TYPE) {
   1888 			/* SPI */
   1889 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1890 			wm_nvm_set_addrbits_size_eecd(sc);
   1891 		} else {
   1892 			/* Microwire */
   1893 			if ((reg & EECD_EE_ABITS) != 0) {
   1894 				sc->sc_nvm_wordsize = 256;
   1895 				sc->sc_nvm_addrbits = 8;
   1896 			} else {
   1897 				sc->sc_nvm_wordsize = 64;
   1898 				sc->sc_nvm_addrbits = 6;
   1899 			}
   1900 		}
   1901 		break;
   1902 	case WM_T_82571:
   1903 	case WM_T_82572:
   1904 		/* SPI */
   1905 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1906 		wm_nvm_set_addrbits_size_eecd(sc);
   1907 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1908 		break;
   1909 	case WM_T_82573:
   1910 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1911 		/* FALLTHROUGH */
   1912 	case WM_T_82574:
   1913 	case WM_T_82583:
   1914 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1915 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1916 			sc->sc_nvm_wordsize = 2048;
   1917 		} else {
   1918 			/* SPI */
   1919 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1920 			wm_nvm_set_addrbits_size_eecd(sc);
   1921 		}
   1922 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1923 		break;
   1924 	case WM_T_82575:
   1925 	case WM_T_82576:
   1926 	case WM_T_82580:
   1927 	case WM_T_I350:
   1928 	case WM_T_I354:
   1929 	case WM_T_80003:
   1930 		/* SPI */
   1931 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1932 		wm_nvm_set_addrbits_size_eecd(sc);
   1933 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1934 		    | WM_F_LOCK_SWSM;
   1935 		break;
   1936 	case WM_T_ICH8:
   1937 	case WM_T_ICH9:
   1938 	case WM_T_ICH10:
   1939 	case WM_T_PCH:
   1940 	case WM_T_PCH2:
   1941 	case WM_T_PCH_LPT:
   1942 		/* FLASH */
   1943 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1944 		sc->sc_nvm_wordsize = 2048;
   1945 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1946 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1947 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1948 			aprint_error_dev(sc->sc_dev,
   1949 			    "can't map FLASH registers\n");
   1950 			goto out;
   1951 		}
   1952 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1953 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1954 		    ICH_FLASH_SECTOR_SIZE;
   1955 		sc->sc_ich8_flash_bank_size =
   1956 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1957 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1958 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1959 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1960 		sc->sc_flashreg_offset = 0;
   1961 		break;
   1962 	case WM_T_PCH_SPT:
   1963 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1964 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1965 		sc->sc_flasht = sc->sc_st;
   1966 		sc->sc_flashh = sc->sc_sh;
   1967 		sc->sc_ich8_flash_base = 0;
   1968 		sc->sc_nvm_wordsize =
   1969 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   1970 			* NVM_SIZE_MULTIPLIER;
   1971 		/* It is size in bytes, we want words */
   1972 		sc->sc_nvm_wordsize /= 2;
   1973 		/* assume 2 banks */
   1974 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   1975 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   1976 		break;
   1977 	case WM_T_I210:
   1978 	case WM_T_I211:
   1979 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1980 			wm_nvm_set_addrbits_size_eecd(sc);
   1981 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1982 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1983 		} else {
   1984 			sc->sc_nvm_wordsize = INVM_SIZE;
   1985 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1986 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1987 		}
   1988 		break;
   1989 	default:
   1990 		break;
   1991 	}
   1992 
   1993 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1994 	switch (sc->sc_type) {
   1995 	case WM_T_82571:
   1996 	case WM_T_82572:
   1997 		reg = CSR_READ(sc, WMREG_SWSM2);
   1998 		if ((reg & SWSM2_LOCK) == 0) {
   1999 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2000 			force_clear_smbi = true;
   2001 		} else
   2002 			force_clear_smbi = false;
   2003 		break;
   2004 	case WM_T_82573:
   2005 	case WM_T_82574:
   2006 	case WM_T_82583:
   2007 		force_clear_smbi = true;
   2008 		break;
   2009 	default:
   2010 		force_clear_smbi = false;
   2011 		break;
   2012 	}
   2013 	if (force_clear_smbi) {
   2014 		reg = CSR_READ(sc, WMREG_SWSM);
   2015 		if ((reg & SWSM_SMBI) != 0)
   2016 			aprint_error_dev(sc->sc_dev,
   2017 			    "Please update the Bootagent\n");
   2018 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2019 	}
   2020 
   2021 	/*
   2022 	 * Defer printing the EEPROM type until after verifying the checksum
   2023 	 * This allows the EEPROM type to be printed correctly in the case
   2024 	 * that no EEPROM is attached.
   2025 	 */
   2026 	/*
   2027 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2028 	 * this for later, so we can fail future reads from the EEPROM.
   2029 	 */
   2030 	if (wm_nvm_validate_checksum(sc)) {
   2031 		/*
   2032 		 * Read twice again because some PCI-e parts fail the
   2033 		 * first check due to the link being in sleep state.
   2034 		 */
   2035 		if (wm_nvm_validate_checksum(sc))
   2036 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2037 	}
   2038 
   2039 	/* Set device properties (macflags) */
   2040 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2041 
   2042 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2043 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2044 	else {
   2045 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2046 		    sc->sc_nvm_wordsize);
   2047 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2048 			aprint_verbose("iNVM");
   2049 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2050 			aprint_verbose("FLASH(HW)");
   2051 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2052 			aprint_verbose("FLASH");
   2053 		else {
   2054 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2055 				eetype = "SPI";
   2056 			else
   2057 				eetype = "MicroWire";
   2058 			aprint_verbose("(%d address bits) %s EEPROM",
   2059 			    sc->sc_nvm_addrbits, eetype);
   2060 		}
   2061 	}
   2062 	wm_nvm_version(sc);
   2063 	aprint_verbose("\n");
   2064 
   2065 	/* Check for I21[01] PLL workaround */
   2066 	if (sc->sc_type == WM_T_I210)
   2067 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2068 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2069 		/* NVM image release 3.25 has a workaround */
   2070 		if ((sc->sc_nvm_ver_major < 3)
   2071 		    || ((sc->sc_nvm_ver_major == 3)
   2072 			&& (sc->sc_nvm_ver_minor < 25))) {
   2073 			aprint_verbose_dev(sc->sc_dev,
   2074 			    "ROM image version %d.%d is older than 3.25\n",
   2075 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2076 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2077 		}
   2078 	}
   2079 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2080 		wm_pll_workaround_i210(sc);
   2081 
   2082 	wm_get_wakeup(sc);
   2083 	switch (sc->sc_type) {
   2084 	case WM_T_82571:
   2085 	case WM_T_82572:
   2086 	case WM_T_82573:
   2087 	case WM_T_82574:
   2088 	case WM_T_82583:
   2089 	case WM_T_80003:
   2090 	case WM_T_ICH8:
   2091 	case WM_T_ICH9:
   2092 	case WM_T_ICH10:
   2093 	case WM_T_PCH:
   2094 	case WM_T_PCH2:
   2095 	case WM_T_PCH_LPT:
   2096 	case WM_T_PCH_SPT:
   2097 		/* Non-AMT based hardware can now take control from firmware */
   2098 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2099 			wm_get_hw_control(sc);
   2100 		break;
   2101 	default:
   2102 		break;
   2103 	}
   2104 
   2105 	/*
   2106 	 * Read the Ethernet address from the EEPROM, if not first found
   2107 	 * in device properties.
   2108 	 */
   2109 	ea = prop_dictionary_get(dict, "mac-address");
   2110 	if (ea != NULL) {
   2111 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2112 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2113 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2114 	} else {
   2115 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2116 			aprint_error_dev(sc->sc_dev,
   2117 			    "unable to read Ethernet address\n");
   2118 			goto out;
   2119 		}
   2120 	}
   2121 
   2122 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2123 	    ether_sprintf(enaddr));
   2124 
   2125 	/*
   2126 	 * Read the config info from the EEPROM, and set up various
   2127 	 * bits in the control registers based on their contents.
   2128 	 */
   2129 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2130 	if (pn != NULL) {
   2131 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2132 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2133 	} else {
   2134 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2135 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2136 			goto out;
   2137 		}
   2138 	}
   2139 
   2140 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2141 	if (pn != NULL) {
   2142 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2143 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2144 	} else {
   2145 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2146 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2147 			goto out;
   2148 		}
   2149 	}
   2150 
   2151 	/* check for WM_F_WOL */
   2152 	switch (sc->sc_type) {
   2153 	case WM_T_82542_2_0:
   2154 	case WM_T_82542_2_1:
   2155 	case WM_T_82543:
   2156 		/* dummy? */
   2157 		eeprom_data = 0;
   2158 		apme_mask = NVM_CFG3_APME;
   2159 		break;
   2160 	case WM_T_82544:
   2161 		apme_mask = NVM_CFG2_82544_APM_EN;
   2162 		eeprom_data = cfg2;
   2163 		break;
   2164 	case WM_T_82546:
   2165 	case WM_T_82546_3:
   2166 	case WM_T_82571:
   2167 	case WM_T_82572:
   2168 	case WM_T_82573:
   2169 	case WM_T_82574:
   2170 	case WM_T_82583:
   2171 	case WM_T_80003:
   2172 	default:
   2173 		apme_mask = NVM_CFG3_APME;
   2174 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2175 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2176 		break;
   2177 	case WM_T_82575:
   2178 	case WM_T_82576:
   2179 	case WM_T_82580:
   2180 	case WM_T_I350:
   2181 	case WM_T_I354: /* XXX ok? */
   2182 	case WM_T_ICH8:
   2183 	case WM_T_ICH9:
   2184 	case WM_T_ICH10:
   2185 	case WM_T_PCH:
   2186 	case WM_T_PCH2:
   2187 	case WM_T_PCH_LPT:
   2188 	case WM_T_PCH_SPT:
   2189 		/* XXX The funcid should be checked on some devices */
   2190 		apme_mask = WUC_APME;
   2191 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2192 		break;
   2193 	}
   2194 
   2195 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2196 	if ((eeprom_data & apme_mask) != 0)
   2197 		sc->sc_flags |= WM_F_WOL;
   2198 #ifdef WM_DEBUG
   2199 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2200 		printf("WOL\n");
   2201 #endif
   2202 
   2203 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2204 		/* Check NVM for autonegotiation */
   2205 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2206 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2207 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2208 		}
   2209 	}
   2210 
   2211 	/*
   2212 	 * XXX need special handling for some multiple port cards
   2213 	 * to disable a paticular port.
   2214 	 */
   2215 
   2216 	if (sc->sc_type >= WM_T_82544) {
   2217 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2218 		if (pn != NULL) {
   2219 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2220 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2221 		} else {
   2222 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2223 				aprint_error_dev(sc->sc_dev,
   2224 				    "unable to read SWDPIN\n");
   2225 				goto out;
   2226 			}
   2227 		}
   2228 	}
   2229 
   2230 	if (cfg1 & NVM_CFG1_ILOS)
   2231 		sc->sc_ctrl |= CTRL_ILOS;
   2232 
   2233 	/*
   2234 	 * XXX
   2235 	 * This code isn't correct because pin 2 and 3 are located
   2236 	 * in different position on newer chips. Check all datasheet.
   2237 	 *
   2238 	 * Until resolve this problem, check if a chip < 82580
   2239 	 */
   2240 	if (sc->sc_type <= WM_T_82580) {
   2241 		if (sc->sc_type >= WM_T_82544) {
   2242 			sc->sc_ctrl |=
   2243 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2244 			    CTRL_SWDPIO_SHIFT;
   2245 			sc->sc_ctrl |=
   2246 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2247 			    CTRL_SWDPINS_SHIFT;
   2248 		} else {
   2249 			sc->sc_ctrl |=
   2250 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2251 			    CTRL_SWDPIO_SHIFT;
   2252 		}
   2253 	}
   2254 
   2255 	/* XXX For other than 82580? */
   2256 	if (sc->sc_type == WM_T_82580) {
   2257 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2258 		if (nvmword & __BIT(13))
   2259 			sc->sc_ctrl |= CTRL_ILOS;
   2260 	}
   2261 
   2262 #if 0
   2263 	if (sc->sc_type >= WM_T_82544) {
   2264 		if (cfg1 & NVM_CFG1_IPS0)
   2265 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2266 		if (cfg1 & NVM_CFG1_IPS1)
   2267 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2268 		sc->sc_ctrl_ext |=
   2269 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2270 		    CTRL_EXT_SWDPIO_SHIFT;
   2271 		sc->sc_ctrl_ext |=
   2272 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2273 		    CTRL_EXT_SWDPINS_SHIFT;
   2274 	} else {
   2275 		sc->sc_ctrl_ext |=
   2276 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2277 		    CTRL_EXT_SWDPIO_SHIFT;
   2278 	}
   2279 #endif
   2280 
   2281 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2282 #if 0
   2283 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2284 #endif
   2285 
   2286 	if (sc->sc_type == WM_T_PCH) {
   2287 		uint16_t val;
   2288 
   2289 		/* Save the NVM K1 bit setting */
   2290 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2291 
   2292 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2293 			sc->sc_nvm_k1_enabled = 1;
   2294 		else
   2295 			sc->sc_nvm_k1_enabled = 0;
   2296 	}
   2297 
   2298 	/*
   2299 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2300 	 * media structures accordingly.
   2301 	 */
   2302 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2303 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2304 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2305 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2306 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2307 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2308 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2309 	} else if (sc->sc_type < WM_T_82543 ||
   2310 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2311 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2312 			aprint_error_dev(sc->sc_dev,
   2313 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2314 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2315 		}
   2316 		wm_tbi_mediainit(sc);
   2317 	} else {
   2318 		switch (sc->sc_type) {
   2319 		case WM_T_82575:
   2320 		case WM_T_82576:
   2321 		case WM_T_82580:
   2322 		case WM_T_I350:
   2323 		case WM_T_I354:
   2324 		case WM_T_I210:
   2325 		case WM_T_I211:
   2326 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2327 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2328 			switch (link_mode) {
   2329 			case CTRL_EXT_LINK_MODE_1000KX:
   2330 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2331 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2332 				break;
   2333 			case CTRL_EXT_LINK_MODE_SGMII:
   2334 				if (wm_sgmii_uses_mdio(sc)) {
   2335 					aprint_verbose_dev(sc->sc_dev,
   2336 					    "SGMII(MDIO)\n");
   2337 					sc->sc_flags |= WM_F_SGMII;
   2338 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2339 					break;
   2340 				}
   2341 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2342 				/*FALLTHROUGH*/
   2343 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2344 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2345 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2346 					if (link_mode
   2347 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2348 						sc->sc_mediatype
   2349 						    = WM_MEDIATYPE_COPPER;
   2350 						sc->sc_flags |= WM_F_SGMII;
   2351 					} else {
   2352 						sc->sc_mediatype
   2353 						    = WM_MEDIATYPE_SERDES;
   2354 						aprint_verbose_dev(sc->sc_dev,
   2355 						    "SERDES\n");
   2356 					}
   2357 					break;
   2358 				}
   2359 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2360 					aprint_verbose_dev(sc->sc_dev,
   2361 					    "SERDES\n");
   2362 
   2363 				/* Change current link mode setting */
   2364 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2365 				switch (sc->sc_mediatype) {
   2366 				case WM_MEDIATYPE_COPPER:
   2367 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2368 					break;
   2369 				case WM_MEDIATYPE_SERDES:
   2370 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2371 					break;
   2372 				default:
   2373 					break;
   2374 				}
   2375 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2376 				break;
   2377 			case CTRL_EXT_LINK_MODE_GMII:
   2378 			default:
   2379 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2380 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2381 				break;
   2382 			}
   2383 
   2384 			reg &= ~CTRL_EXT_I2C_ENA;
   2385 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2386 				reg |= CTRL_EXT_I2C_ENA;
   2387 			else
   2388 				reg &= ~CTRL_EXT_I2C_ENA;
   2389 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2390 
   2391 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2392 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2393 			else
   2394 				wm_tbi_mediainit(sc);
   2395 			break;
   2396 		default:
   2397 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2398 				aprint_error_dev(sc->sc_dev,
   2399 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2400 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2401 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2402 		}
   2403 	}
   2404 
   2405 	ifp = &sc->sc_ethercom.ec_if;
   2406 	xname = device_xname(sc->sc_dev);
   2407 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2408 	ifp->if_softc = sc;
   2409 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2410 	ifp->if_ioctl = wm_ioctl;
   2411 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2412 		ifp->if_start = wm_nq_start;
   2413 		if (sc->sc_nqueues > 1)
   2414 			ifp->if_transmit = wm_nq_transmit;
   2415 	} else
   2416 		ifp->if_start = wm_start;
   2417 	ifp->if_watchdog = wm_watchdog;
   2418 	ifp->if_init = wm_init;
   2419 	ifp->if_stop = wm_stop;
   2420 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2421 	IFQ_SET_READY(&ifp->if_snd);
   2422 
   2423 	/* Check for jumbo frame */
   2424 	switch (sc->sc_type) {
   2425 	case WM_T_82573:
   2426 		/* XXX limited to 9234 if ASPM is disabled */
   2427 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2428 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2429 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2430 		break;
   2431 	case WM_T_82571:
   2432 	case WM_T_82572:
   2433 	case WM_T_82574:
   2434 	case WM_T_82575:
   2435 	case WM_T_82576:
   2436 	case WM_T_82580:
   2437 	case WM_T_I350:
   2438 	case WM_T_I354: /* XXXX ok? */
   2439 	case WM_T_I210:
   2440 	case WM_T_I211:
   2441 	case WM_T_80003:
   2442 	case WM_T_ICH9:
   2443 	case WM_T_ICH10:
   2444 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2445 	case WM_T_PCH_LPT:
   2446 	case WM_T_PCH_SPT:
   2447 		/* XXX limited to 9234 */
   2448 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2449 		break;
   2450 	case WM_T_PCH:
   2451 		/* XXX limited to 4096 */
   2452 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2453 		break;
   2454 	case WM_T_82542_2_0:
   2455 	case WM_T_82542_2_1:
   2456 	case WM_T_82583:
   2457 	case WM_T_ICH8:
   2458 		/* No support for jumbo frame */
   2459 		break;
   2460 	default:
   2461 		/* ETHER_MAX_LEN_JUMBO */
   2462 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2463 		break;
   2464 	}
   2465 
   2466 	/* If we're a i82543 or greater, we can support VLANs. */
   2467 	if (sc->sc_type >= WM_T_82543)
   2468 		sc->sc_ethercom.ec_capabilities |=
   2469 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2470 
   2471 	/*
   2472 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2473 	 * on i82543 and later.
   2474 	 */
   2475 	if (sc->sc_type >= WM_T_82543) {
   2476 		ifp->if_capabilities |=
   2477 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2478 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2479 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2480 		    IFCAP_CSUM_TCPv6_Tx |
   2481 		    IFCAP_CSUM_UDPv6_Tx;
   2482 	}
   2483 
   2484 	/*
   2485 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2486 	 *
   2487 	 *	82541GI (8086:1076) ... no
   2488 	 *	82572EI (8086:10b9) ... yes
   2489 	 */
   2490 	if (sc->sc_type >= WM_T_82571) {
   2491 		ifp->if_capabilities |=
   2492 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2493 	}
   2494 
   2495 	/*
   2496 	 * If we're a i82544 or greater (except i82547), we can do
   2497 	 * TCP segmentation offload.
   2498 	 */
   2499 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2500 		ifp->if_capabilities |= IFCAP_TSOv4;
   2501 	}
   2502 
   2503 	if (sc->sc_type >= WM_T_82571) {
   2504 		ifp->if_capabilities |= IFCAP_TSOv6;
   2505 	}
   2506 
   2507 #ifdef WM_MPSAFE
   2508 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2509 #else
   2510 	sc->sc_core_lock = NULL;
   2511 #endif
   2512 
   2513 	/* Attach the interface. */
   2514 	if_initialize(ifp);
   2515 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2516 	ether_ifattach(ifp, enaddr);
   2517 	if_register(ifp);
   2518 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2519 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2520 			  RND_FLAG_DEFAULT);
   2521 
   2522 #ifdef WM_EVENT_COUNTERS
   2523 	/* Attach event counters. */
   2524 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2525 	    NULL, xname, "txsstall");
   2526 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2527 	    NULL, xname, "txdstall");
   2528 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2529 	    NULL, xname, "txfifo_stall");
   2530 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2531 	    NULL, xname, "txdw");
   2532 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2533 	    NULL, xname, "txqe");
   2534 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2535 	    NULL, xname, "rxintr");
   2536 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2537 	    NULL, xname, "linkintr");
   2538 
   2539 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2540 	    NULL, xname, "rxipsum");
   2541 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2542 	    NULL, xname, "rxtusum");
   2543 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2544 	    NULL, xname, "txipsum");
   2545 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2546 	    NULL, xname, "txtusum");
   2547 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2548 	    NULL, xname, "txtusum6");
   2549 
   2550 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2551 	    NULL, xname, "txtso");
   2552 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2553 	    NULL, xname, "txtso6");
   2554 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2555 	    NULL, xname, "txtsopain");
   2556 
   2557 	for (i = 0; i < WM_NTXSEGS; i++) {
   2558 		snprintf(wm_txseg_evcnt_names[i],
   2559 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2560 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2561 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2562 	}
   2563 
   2564 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2565 	    NULL, xname, "txdrop");
   2566 
   2567 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2568 	    NULL, xname, "tu");
   2569 
   2570 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2571 	    NULL, xname, "tx_xoff");
   2572 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2573 	    NULL, xname, "tx_xon");
   2574 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2575 	    NULL, xname, "rx_xoff");
   2576 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2577 	    NULL, xname, "rx_xon");
   2578 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2579 	    NULL, xname, "rx_macctl");
   2580 #endif /* WM_EVENT_COUNTERS */
   2581 
   2582 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2583 		pmf_class_network_register(self, ifp);
   2584 	else
   2585 		aprint_error_dev(self, "couldn't establish power handler\n");
   2586 
   2587 	sc->sc_flags |= WM_F_ATTACHED;
   2588  out:
   2589 	return;
   2590 }
   2591 
   2592 /* The detach function (ca_detach) */
   2593 static int
   2594 wm_detach(device_t self, int flags __unused)
   2595 {
   2596 	struct wm_softc *sc = device_private(self);
   2597 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2598 	int i;
   2599 
   2600 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2601 		return 0;
   2602 
   2603 	/* Stop the interface. Callouts are stopped in it. */
   2604 	wm_stop(ifp, 1);
   2605 
   2606 	pmf_device_deregister(self);
   2607 
   2608 	/* Tell the firmware about the release */
   2609 	WM_CORE_LOCK(sc);
   2610 	wm_release_manageability(sc);
   2611 	wm_release_hw_control(sc);
   2612 	WM_CORE_UNLOCK(sc);
   2613 
   2614 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2615 
   2616 	/* Delete all remaining media. */
   2617 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2618 
   2619 	ether_ifdetach(ifp);
   2620 	if_detach(ifp);
   2621 	if_percpuq_destroy(sc->sc_ipq);
   2622 
   2623 	/* Unload RX dmamaps and free mbufs */
   2624 	for (i = 0; i < sc->sc_nqueues; i++) {
   2625 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2626 		mutex_enter(rxq->rxq_lock);
   2627 		wm_rxdrain(rxq);
   2628 		mutex_exit(rxq->rxq_lock);
   2629 	}
   2630 	/* Must unlock here */
   2631 
   2632 	/* Disestablish the interrupt handler */
   2633 	for (i = 0; i < sc->sc_nintrs; i++) {
   2634 		if (sc->sc_ihs[i] != NULL) {
   2635 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2636 			sc->sc_ihs[i] = NULL;
   2637 		}
   2638 	}
   2639 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2640 
   2641 	wm_free_txrx_queues(sc);
   2642 
   2643 	/* Unmap the registers */
   2644 	if (sc->sc_ss) {
   2645 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2646 		sc->sc_ss = 0;
   2647 	}
   2648 	if (sc->sc_ios) {
   2649 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2650 		sc->sc_ios = 0;
   2651 	}
   2652 	if (sc->sc_flashs) {
   2653 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2654 		sc->sc_flashs = 0;
   2655 	}
   2656 
   2657 	if (sc->sc_core_lock)
   2658 		mutex_obj_free(sc->sc_core_lock);
   2659 
   2660 	return 0;
   2661 }
   2662 
   2663 static bool
   2664 wm_suspend(device_t self, const pmf_qual_t *qual)
   2665 {
   2666 	struct wm_softc *sc = device_private(self);
   2667 
   2668 	wm_release_manageability(sc);
   2669 	wm_release_hw_control(sc);
   2670 #ifdef WM_WOL
   2671 	wm_enable_wakeup(sc);
   2672 #endif
   2673 
   2674 	return true;
   2675 }
   2676 
   2677 static bool
   2678 wm_resume(device_t self, const pmf_qual_t *qual)
   2679 {
   2680 	struct wm_softc *sc = device_private(self);
   2681 
   2682 	wm_init_manageability(sc);
   2683 
   2684 	return true;
   2685 }
   2686 
   2687 /*
   2688  * wm_watchdog:		[ifnet interface function]
   2689  *
   2690  *	Watchdog timer handler.
   2691  */
   2692 static void
   2693 wm_watchdog(struct ifnet *ifp)
   2694 {
   2695 	int qid;
   2696 	struct wm_softc *sc = ifp->if_softc;
   2697 
   2698 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2699 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2700 
   2701 		wm_watchdog_txq(ifp, txq);
   2702 	}
   2703 
   2704 	/* Reset the interface. */
   2705 	(void) wm_init(ifp);
   2706 
   2707 	/*
   2708 	 * There are still some upper layer processing which call
   2709 	 * ifp->if_start(). e.g. ALTQ
   2710 	 */
   2711 	/* Try to get more packets going. */
   2712 	ifp->if_start(ifp);
   2713 }
   2714 
   2715 static void
   2716 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2717 {
   2718 	struct wm_softc *sc = ifp->if_softc;
   2719 
   2720 	/*
   2721 	 * Since we're using delayed interrupts, sweep up
   2722 	 * before we report an error.
   2723 	 */
   2724 	mutex_enter(txq->txq_lock);
   2725 	wm_txeof(sc, txq);
   2726 	mutex_exit(txq->txq_lock);
   2727 
   2728 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2729 #ifdef WM_DEBUG
   2730 		int i, j;
   2731 		struct wm_txsoft *txs;
   2732 #endif
   2733 		log(LOG_ERR,
   2734 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2735 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2736 		    txq->txq_next);
   2737 		ifp->if_oerrors++;
   2738 #ifdef WM_DEBUG
   2739 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2740 		    i = WM_NEXTTXS(txq, i)) {
   2741 		    txs = &txq->txq_soft[i];
   2742 		    printf("txs %d tx %d -> %d\n",
   2743 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2744 		    for (j = txs->txs_firstdesc; ;
   2745 			j = WM_NEXTTX(txq, j)) {
   2746 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2747 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2748 			printf("\t %#08x%08x\n",
   2749 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2750 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2751 			if (j == txs->txs_lastdesc)
   2752 				break;
   2753 			}
   2754 		}
   2755 #endif
   2756 	}
   2757 }
   2758 
   2759 /*
   2760  * wm_tick:
   2761  *
   2762  *	One second timer, used to check link status, sweep up
   2763  *	completed transmit jobs, etc.
   2764  */
   2765 static void
   2766 wm_tick(void *arg)
   2767 {
   2768 	struct wm_softc *sc = arg;
   2769 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2770 #ifndef WM_MPSAFE
   2771 	int s = splnet();
   2772 #endif
   2773 
   2774 	WM_CORE_LOCK(sc);
   2775 
   2776 	if (sc->sc_stopping)
   2777 		goto out;
   2778 
   2779 	if (sc->sc_type >= WM_T_82542_2_1) {
   2780 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2781 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2782 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2783 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2784 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2785 	}
   2786 
   2787 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2788 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2789 	    + CSR_READ(sc, WMREG_CRCERRS)
   2790 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2791 	    + CSR_READ(sc, WMREG_SYMERRC)
   2792 	    + CSR_READ(sc, WMREG_RXERRC)
   2793 	    + CSR_READ(sc, WMREG_SEC)
   2794 	    + CSR_READ(sc, WMREG_CEXTERR)
   2795 	    + CSR_READ(sc, WMREG_RLEC);
   2796 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2797 
   2798 	if (sc->sc_flags & WM_F_HAS_MII)
   2799 		mii_tick(&sc->sc_mii);
   2800 	else if ((sc->sc_type >= WM_T_82575)
   2801 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2802 		wm_serdes_tick(sc);
   2803 	else
   2804 		wm_tbi_tick(sc);
   2805 
   2806 out:
   2807 	WM_CORE_UNLOCK(sc);
   2808 #ifndef WM_MPSAFE
   2809 	splx(s);
   2810 #endif
   2811 
   2812 	if (!sc->sc_stopping)
   2813 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2814 }
   2815 
   2816 static int
   2817 wm_ifflags_cb(struct ethercom *ec)
   2818 {
   2819 	struct ifnet *ifp = &ec->ec_if;
   2820 	struct wm_softc *sc = ifp->if_softc;
   2821 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2822 	int rc = 0;
   2823 
   2824 	WM_CORE_LOCK(sc);
   2825 
   2826 	if (change != 0)
   2827 		sc->sc_if_flags = ifp->if_flags;
   2828 
   2829 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2830 		rc = ENETRESET;
   2831 		goto out;
   2832 	}
   2833 
   2834 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2835 		wm_set_filter(sc);
   2836 
   2837 	wm_set_vlan(sc);
   2838 
   2839 out:
   2840 	WM_CORE_UNLOCK(sc);
   2841 
   2842 	return rc;
   2843 }
   2844 
   2845 /*
   2846  * wm_ioctl:		[ifnet interface function]
   2847  *
   2848  *	Handle control requests from the operator.
   2849  */
   2850 static int
   2851 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2852 {
   2853 	struct wm_softc *sc = ifp->if_softc;
   2854 	struct ifreq *ifr = (struct ifreq *) data;
   2855 	struct ifaddr *ifa = (struct ifaddr *)data;
   2856 	struct sockaddr_dl *sdl;
   2857 	int s, error;
   2858 
   2859 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2860 		device_xname(sc->sc_dev), __func__));
   2861 #ifndef WM_MPSAFE
   2862 	s = splnet();
   2863 #endif
   2864 	switch (cmd) {
   2865 	case SIOCSIFMEDIA:
   2866 	case SIOCGIFMEDIA:
   2867 		WM_CORE_LOCK(sc);
   2868 		/* Flow control requires full-duplex mode. */
   2869 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2870 		    (ifr->ifr_media & IFM_FDX) == 0)
   2871 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2872 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2873 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2874 				/* We can do both TXPAUSE and RXPAUSE. */
   2875 				ifr->ifr_media |=
   2876 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2877 			}
   2878 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2879 		}
   2880 		WM_CORE_UNLOCK(sc);
   2881 #ifdef WM_MPSAFE
   2882 		s = splnet();
   2883 #endif
   2884 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2885 #ifdef WM_MPSAFE
   2886 		splx(s);
   2887 #endif
   2888 		break;
   2889 	case SIOCINITIFADDR:
   2890 		WM_CORE_LOCK(sc);
   2891 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2892 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2893 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2894 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2895 			/* unicast address is first multicast entry */
   2896 			wm_set_filter(sc);
   2897 			error = 0;
   2898 			WM_CORE_UNLOCK(sc);
   2899 			break;
   2900 		}
   2901 		WM_CORE_UNLOCK(sc);
   2902 		/*FALLTHROUGH*/
   2903 	default:
   2904 #ifdef WM_MPSAFE
   2905 		s = splnet();
   2906 #endif
   2907 		/* It may call wm_start, so unlock here */
   2908 		error = ether_ioctl(ifp, cmd, data);
   2909 #ifdef WM_MPSAFE
   2910 		splx(s);
   2911 #endif
   2912 		if (error != ENETRESET)
   2913 			break;
   2914 
   2915 		error = 0;
   2916 
   2917 		if (cmd == SIOCSIFCAP) {
   2918 			error = (*ifp->if_init)(ifp);
   2919 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2920 			;
   2921 		else if (ifp->if_flags & IFF_RUNNING) {
   2922 			/*
   2923 			 * Multicast list has changed; set the hardware filter
   2924 			 * accordingly.
   2925 			 */
   2926 			WM_CORE_LOCK(sc);
   2927 			wm_set_filter(sc);
   2928 			WM_CORE_UNLOCK(sc);
   2929 		}
   2930 		break;
   2931 	}
   2932 
   2933 #ifndef WM_MPSAFE
   2934 	splx(s);
   2935 #endif
   2936 	return error;
   2937 }
   2938 
   2939 /* MAC address related */
   2940 
   2941 /*
   2942  * Get the offset of MAC address and return it.
   2943  * If error occured, use offset 0.
   2944  */
   2945 static uint16_t
   2946 wm_check_alt_mac_addr(struct wm_softc *sc)
   2947 {
   2948 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2949 	uint16_t offset = NVM_OFF_MACADDR;
   2950 
   2951 	/* Try to read alternative MAC address pointer */
   2952 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2953 		return 0;
   2954 
   2955 	/* Check pointer if it's valid or not. */
   2956 	if ((offset == 0x0000) || (offset == 0xffff))
   2957 		return 0;
   2958 
   2959 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2960 	/*
   2961 	 * Check whether alternative MAC address is valid or not.
   2962 	 * Some cards have non 0xffff pointer but those don't use
   2963 	 * alternative MAC address in reality.
   2964 	 *
   2965 	 * Check whether the broadcast bit is set or not.
   2966 	 */
   2967 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2968 		if (((myea[0] & 0xff) & 0x01) == 0)
   2969 			return offset; /* Found */
   2970 
   2971 	/* Not found */
   2972 	return 0;
   2973 }
   2974 
   2975 static int
   2976 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2977 {
   2978 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2979 	uint16_t offset = NVM_OFF_MACADDR;
   2980 	int do_invert = 0;
   2981 
   2982 	switch (sc->sc_type) {
   2983 	case WM_T_82580:
   2984 	case WM_T_I350:
   2985 	case WM_T_I354:
   2986 		/* EEPROM Top Level Partitioning */
   2987 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2988 		break;
   2989 	case WM_T_82571:
   2990 	case WM_T_82575:
   2991 	case WM_T_82576:
   2992 	case WM_T_80003:
   2993 	case WM_T_I210:
   2994 	case WM_T_I211:
   2995 		offset = wm_check_alt_mac_addr(sc);
   2996 		if (offset == 0)
   2997 			if ((sc->sc_funcid & 0x01) == 1)
   2998 				do_invert = 1;
   2999 		break;
   3000 	default:
   3001 		if ((sc->sc_funcid & 0x01) == 1)
   3002 			do_invert = 1;
   3003 		break;
   3004 	}
   3005 
   3006 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   3007 		myea) != 0)
   3008 		goto bad;
   3009 
   3010 	enaddr[0] = myea[0] & 0xff;
   3011 	enaddr[1] = myea[0] >> 8;
   3012 	enaddr[2] = myea[1] & 0xff;
   3013 	enaddr[3] = myea[1] >> 8;
   3014 	enaddr[4] = myea[2] & 0xff;
   3015 	enaddr[5] = myea[2] >> 8;
   3016 
   3017 	/*
   3018 	 * Toggle the LSB of the MAC address on the second port
   3019 	 * of some dual port cards.
   3020 	 */
   3021 	if (do_invert != 0)
   3022 		enaddr[5] ^= 1;
   3023 
   3024 	return 0;
   3025 
   3026  bad:
   3027 	return -1;
   3028 }
   3029 
   3030 /*
   3031  * wm_set_ral:
   3032  *
   3033  *	Set an entery in the receive address list.
   3034  */
   3035 static void
   3036 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3037 {
   3038 	uint32_t ral_lo, ral_hi;
   3039 
   3040 	if (enaddr != NULL) {
   3041 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3042 		    (enaddr[3] << 24);
   3043 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3044 		ral_hi |= RAL_AV;
   3045 	} else {
   3046 		ral_lo = 0;
   3047 		ral_hi = 0;
   3048 	}
   3049 
   3050 	if (sc->sc_type >= WM_T_82544) {
   3051 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3052 		    ral_lo);
   3053 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3054 		    ral_hi);
   3055 	} else {
   3056 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3057 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3058 	}
   3059 }
   3060 
   3061 /*
   3062  * wm_mchash:
   3063  *
   3064  *	Compute the hash of the multicast address for the 4096-bit
   3065  *	multicast filter.
   3066  */
   3067 static uint32_t
   3068 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3069 {
   3070 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3071 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3072 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3073 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3074 	uint32_t hash;
   3075 
   3076 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3077 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3078 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3079 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3080 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3081 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3082 		return (hash & 0x3ff);
   3083 	}
   3084 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3085 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3086 
   3087 	return (hash & 0xfff);
   3088 }
   3089 
   3090 /*
   3091  * wm_set_filter:
   3092  *
   3093  *	Set up the receive filter.
   3094  */
   3095 static void
   3096 wm_set_filter(struct wm_softc *sc)
   3097 {
   3098 	struct ethercom *ec = &sc->sc_ethercom;
   3099 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3100 	struct ether_multi *enm;
   3101 	struct ether_multistep step;
   3102 	bus_addr_t mta_reg;
   3103 	uint32_t hash, reg, bit;
   3104 	int i, size, ralmax;
   3105 
   3106 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3107 		device_xname(sc->sc_dev), __func__));
   3108 	if (sc->sc_type >= WM_T_82544)
   3109 		mta_reg = WMREG_CORDOVA_MTA;
   3110 	else
   3111 		mta_reg = WMREG_MTA;
   3112 
   3113 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3114 
   3115 	if (ifp->if_flags & IFF_BROADCAST)
   3116 		sc->sc_rctl |= RCTL_BAM;
   3117 	if (ifp->if_flags & IFF_PROMISC) {
   3118 		sc->sc_rctl |= RCTL_UPE;
   3119 		goto allmulti;
   3120 	}
   3121 
   3122 	/*
   3123 	 * Set the station address in the first RAL slot, and
   3124 	 * clear the remaining slots.
   3125 	 */
   3126 	if (sc->sc_type == WM_T_ICH8)
   3127 		size = WM_RAL_TABSIZE_ICH8 -1;
   3128 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3129 	    || (sc->sc_type == WM_T_PCH))
   3130 		size = WM_RAL_TABSIZE_ICH8;
   3131 	else if (sc->sc_type == WM_T_PCH2)
   3132 		size = WM_RAL_TABSIZE_PCH2;
   3133 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3134 		size = WM_RAL_TABSIZE_PCH_LPT;
   3135 	else if (sc->sc_type == WM_T_82575)
   3136 		size = WM_RAL_TABSIZE_82575;
   3137 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3138 		size = WM_RAL_TABSIZE_82576;
   3139 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3140 		size = WM_RAL_TABSIZE_I350;
   3141 	else
   3142 		size = WM_RAL_TABSIZE;
   3143 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3144 
   3145 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3146 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3147 		switch (i) {
   3148 		case 0:
   3149 			/* We can use all entries */
   3150 			ralmax = size;
   3151 			break;
   3152 		case 1:
   3153 			/* Only RAR[0] */
   3154 			ralmax = 1;
   3155 			break;
   3156 		default:
   3157 			/* available SHRA + RAR[0] */
   3158 			ralmax = i + 1;
   3159 		}
   3160 	} else
   3161 		ralmax = size;
   3162 	for (i = 1; i < size; i++) {
   3163 		if (i < ralmax)
   3164 			wm_set_ral(sc, NULL, i);
   3165 	}
   3166 
   3167 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3168 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3169 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3170 	    || (sc->sc_type == WM_T_PCH_SPT))
   3171 		size = WM_ICH8_MC_TABSIZE;
   3172 	else
   3173 		size = WM_MC_TABSIZE;
   3174 	/* Clear out the multicast table. */
   3175 	for (i = 0; i < size; i++)
   3176 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3177 
   3178 	ETHER_FIRST_MULTI(step, ec, enm);
   3179 	while (enm != NULL) {
   3180 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3181 			/*
   3182 			 * We must listen to a range of multicast addresses.
   3183 			 * For now, just accept all multicasts, rather than
   3184 			 * trying to set only those filter bits needed to match
   3185 			 * the range.  (At this time, the only use of address
   3186 			 * ranges is for IP multicast routing, for which the
   3187 			 * range is big enough to require all bits set.)
   3188 			 */
   3189 			goto allmulti;
   3190 		}
   3191 
   3192 		hash = wm_mchash(sc, enm->enm_addrlo);
   3193 
   3194 		reg = (hash >> 5);
   3195 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3196 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3197 		    || (sc->sc_type == WM_T_PCH2)
   3198 		    || (sc->sc_type == WM_T_PCH_LPT)
   3199 		    || (sc->sc_type == WM_T_PCH_SPT))
   3200 			reg &= 0x1f;
   3201 		else
   3202 			reg &= 0x7f;
   3203 		bit = hash & 0x1f;
   3204 
   3205 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3206 		hash |= 1U << bit;
   3207 
   3208 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3209 			/*
   3210 			 * 82544 Errata 9: Certain register cannot be written
   3211 			 * with particular alignments in PCI-X bus operation
   3212 			 * (FCAH, MTA and VFTA).
   3213 			 */
   3214 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3215 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3216 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3217 		} else
   3218 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3219 
   3220 		ETHER_NEXT_MULTI(step, enm);
   3221 	}
   3222 
   3223 	ifp->if_flags &= ~IFF_ALLMULTI;
   3224 	goto setit;
   3225 
   3226  allmulti:
   3227 	ifp->if_flags |= IFF_ALLMULTI;
   3228 	sc->sc_rctl |= RCTL_MPE;
   3229 
   3230  setit:
   3231 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3232 }
   3233 
   3234 /* Reset and init related */
   3235 
   3236 static void
   3237 wm_set_vlan(struct wm_softc *sc)
   3238 {
   3239 
   3240 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3241 		device_xname(sc->sc_dev), __func__));
   3242 	/* Deal with VLAN enables. */
   3243 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3244 		sc->sc_ctrl |= CTRL_VME;
   3245 	else
   3246 		sc->sc_ctrl &= ~CTRL_VME;
   3247 
   3248 	/* Write the control registers. */
   3249 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3250 }
   3251 
   3252 static void
   3253 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3254 {
   3255 	uint32_t gcr;
   3256 	pcireg_t ctrl2;
   3257 
   3258 	gcr = CSR_READ(sc, WMREG_GCR);
   3259 
   3260 	/* Only take action if timeout value is defaulted to 0 */
   3261 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3262 		goto out;
   3263 
   3264 	if ((gcr & GCR_CAP_VER2) == 0) {
   3265 		gcr |= GCR_CMPL_TMOUT_10MS;
   3266 		goto out;
   3267 	}
   3268 
   3269 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3270 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3271 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3272 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3273 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3274 
   3275 out:
   3276 	/* Disable completion timeout resend */
   3277 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3278 
   3279 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3280 }
   3281 
   3282 void
   3283 wm_get_auto_rd_done(struct wm_softc *sc)
   3284 {
   3285 	int i;
   3286 
   3287 	/* wait for eeprom to reload */
   3288 	switch (sc->sc_type) {
   3289 	case WM_T_82571:
   3290 	case WM_T_82572:
   3291 	case WM_T_82573:
   3292 	case WM_T_82574:
   3293 	case WM_T_82583:
   3294 	case WM_T_82575:
   3295 	case WM_T_82576:
   3296 	case WM_T_82580:
   3297 	case WM_T_I350:
   3298 	case WM_T_I354:
   3299 	case WM_T_I210:
   3300 	case WM_T_I211:
   3301 	case WM_T_80003:
   3302 	case WM_T_ICH8:
   3303 	case WM_T_ICH9:
   3304 		for (i = 0; i < 10; i++) {
   3305 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3306 				break;
   3307 			delay(1000);
   3308 		}
   3309 		if (i == 10) {
   3310 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3311 			    "complete\n", device_xname(sc->sc_dev));
   3312 		}
   3313 		break;
   3314 	default:
   3315 		break;
   3316 	}
   3317 }
   3318 
   3319 void
   3320 wm_lan_init_done(struct wm_softc *sc)
   3321 {
   3322 	uint32_t reg = 0;
   3323 	int i;
   3324 
   3325 	/* wait for eeprom to reload */
   3326 	switch (sc->sc_type) {
   3327 	case WM_T_ICH10:
   3328 	case WM_T_PCH:
   3329 	case WM_T_PCH2:
   3330 	case WM_T_PCH_LPT:
   3331 	case WM_T_PCH_SPT:
   3332 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3333 			reg = CSR_READ(sc, WMREG_STATUS);
   3334 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3335 				break;
   3336 			delay(100);
   3337 		}
   3338 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3339 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3340 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3341 		}
   3342 		break;
   3343 	default:
   3344 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3345 		    __func__);
   3346 		break;
   3347 	}
   3348 
   3349 	reg &= ~STATUS_LAN_INIT_DONE;
   3350 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3351 }
   3352 
   3353 void
   3354 wm_get_cfg_done(struct wm_softc *sc)
   3355 {
   3356 	int mask;
   3357 	uint32_t reg;
   3358 	int i;
   3359 
   3360 	/* wait for eeprom to reload */
   3361 	switch (sc->sc_type) {
   3362 	case WM_T_82542_2_0:
   3363 	case WM_T_82542_2_1:
   3364 		/* null */
   3365 		break;
   3366 	case WM_T_82543:
   3367 	case WM_T_82544:
   3368 	case WM_T_82540:
   3369 	case WM_T_82545:
   3370 	case WM_T_82545_3:
   3371 	case WM_T_82546:
   3372 	case WM_T_82546_3:
   3373 	case WM_T_82541:
   3374 	case WM_T_82541_2:
   3375 	case WM_T_82547:
   3376 	case WM_T_82547_2:
   3377 	case WM_T_82573:
   3378 	case WM_T_82574:
   3379 	case WM_T_82583:
   3380 		/* generic */
   3381 		delay(10*1000);
   3382 		break;
   3383 	case WM_T_80003:
   3384 	case WM_T_82571:
   3385 	case WM_T_82572:
   3386 	case WM_T_82575:
   3387 	case WM_T_82576:
   3388 	case WM_T_82580:
   3389 	case WM_T_I350:
   3390 	case WM_T_I354:
   3391 	case WM_T_I210:
   3392 	case WM_T_I211:
   3393 		if (sc->sc_type == WM_T_82571) {
   3394 			/* Only 82571 shares port 0 */
   3395 			mask = EEMNGCTL_CFGDONE_0;
   3396 		} else
   3397 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3398 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3399 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3400 				break;
   3401 			delay(1000);
   3402 		}
   3403 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3404 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3405 				device_xname(sc->sc_dev), __func__));
   3406 		}
   3407 		break;
   3408 	case WM_T_ICH8:
   3409 	case WM_T_ICH9:
   3410 	case WM_T_ICH10:
   3411 	case WM_T_PCH:
   3412 	case WM_T_PCH2:
   3413 	case WM_T_PCH_LPT:
   3414 	case WM_T_PCH_SPT:
   3415 		delay(10*1000);
   3416 		if (sc->sc_type >= WM_T_ICH10)
   3417 			wm_lan_init_done(sc);
   3418 		else
   3419 			wm_get_auto_rd_done(sc);
   3420 
   3421 		reg = CSR_READ(sc, WMREG_STATUS);
   3422 		if ((reg & STATUS_PHYRA) != 0)
   3423 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3424 		break;
   3425 	default:
   3426 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3427 		    __func__);
   3428 		break;
   3429 	}
   3430 }
   3431 
   3432 /* Init hardware bits */
   3433 void
   3434 wm_initialize_hardware_bits(struct wm_softc *sc)
   3435 {
   3436 	uint32_t tarc0, tarc1, reg;
   3437 
   3438 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3439 		device_xname(sc->sc_dev), __func__));
   3440 	/* For 82571 variant, 80003 and ICHs */
   3441 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3442 	    || (sc->sc_type >= WM_T_80003)) {
   3443 
   3444 		/* Transmit Descriptor Control 0 */
   3445 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3446 		reg |= TXDCTL_COUNT_DESC;
   3447 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3448 
   3449 		/* Transmit Descriptor Control 1 */
   3450 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3451 		reg |= TXDCTL_COUNT_DESC;
   3452 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3453 
   3454 		/* TARC0 */
   3455 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3456 		switch (sc->sc_type) {
   3457 		case WM_T_82571:
   3458 		case WM_T_82572:
   3459 		case WM_T_82573:
   3460 		case WM_T_82574:
   3461 		case WM_T_82583:
   3462 		case WM_T_80003:
   3463 			/* Clear bits 30..27 */
   3464 			tarc0 &= ~__BITS(30, 27);
   3465 			break;
   3466 		default:
   3467 			break;
   3468 		}
   3469 
   3470 		switch (sc->sc_type) {
   3471 		case WM_T_82571:
   3472 		case WM_T_82572:
   3473 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3474 
   3475 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3476 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3477 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3478 			/* 8257[12] Errata No.7 */
   3479 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3480 
   3481 			/* TARC1 bit 28 */
   3482 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3483 				tarc1 &= ~__BIT(28);
   3484 			else
   3485 				tarc1 |= __BIT(28);
   3486 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3487 
   3488 			/*
   3489 			 * 8257[12] Errata No.13
   3490 			 * Disable Dyamic Clock Gating.
   3491 			 */
   3492 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3493 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3494 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3495 			break;
   3496 		case WM_T_82573:
   3497 		case WM_T_82574:
   3498 		case WM_T_82583:
   3499 			if ((sc->sc_type == WM_T_82574)
   3500 			    || (sc->sc_type == WM_T_82583))
   3501 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3502 
   3503 			/* Extended Device Control */
   3504 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3505 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3506 			reg |= __BIT(22);	/* Set bit 22 */
   3507 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3508 
   3509 			/* Device Control */
   3510 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3511 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3512 
   3513 			/* PCIe Control Register */
   3514 			/*
   3515 			 * 82573 Errata (unknown).
   3516 			 *
   3517 			 * 82574 Errata 25 and 82583 Errata 12
   3518 			 * "Dropped Rx Packets":
   3519 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3520 			 */
   3521 			reg = CSR_READ(sc, WMREG_GCR);
   3522 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3523 			CSR_WRITE(sc, WMREG_GCR, reg);
   3524 
   3525 			if ((sc->sc_type == WM_T_82574)
   3526 			    || (sc->sc_type == WM_T_82583)) {
   3527 				/*
   3528 				 * Document says this bit must be set for
   3529 				 * proper operation.
   3530 				 */
   3531 				reg = CSR_READ(sc, WMREG_GCR);
   3532 				reg |= __BIT(22);
   3533 				CSR_WRITE(sc, WMREG_GCR, reg);
   3534 
   3535 				/*
   3536 				 * Apply workaround for hardware errata
   3537 				 * documented in errata docs Fixes issue where
   3538 				 * some error prone or unreliable PCIe
   3539 				 * completions are occurring, particularly
   3540 				 * with ASPM enabled. Without fix, issue can
   3541 				 * cause Tx timeouts.
   3542 				 */
   3543 				reg = CSR_READ(sc, WMREG_GCR2);
   3544 				reg |= __BIT(0);
   3545 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3546 			}
   3547 			break;
   3548 		case WM_T_80003:
   3549 			/* TARC0 */
   3550 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3551 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3552 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3553 
   3554 			/* TARC1 bit 28 */
   3555 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3556 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3557 				tarc1 &= ~__BIT(28);
   3558 			else
   3559 				tarc1 |= __BIT(28);
   3560 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3561 			break;
   3562 		case WM_T_ICH8:
   3563 		case WM_T_ICH9:
   3564 		case WM_T_ICH10:
   3565 		case WM_T_PCH:
   3566 		case WM_T_PCH2:
   3567 		case WM_T_PCH_LPT:
   3568 		case WM_T_PCH_SPT:
   3569 			/* TARC0 */
   3570 			if ((sc->sc_type == WM_T_ICH8)
   3571 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3572 				/* Set TARC0 bits 29 and 28 */
   3573 				tarc0 |= __BITS(29, 28);
   3574 			}
   3575 			/* Set TARC0 bits 23,24,26,27 */
   3576 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3577 
   3578 			/* CTRL_EXT */
   3579 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3580 			reg |= __BIT(22);	/* Set bit 22 */
   3581 			/*
   3582 			 * Enable PHY low-power state when MAC is at D3
   3583 			 * w/o WoL
   3584 			 */
   3585 			if (sc->sc_type >= WM_T_PCH)
   3586 				reg |= CTRL_EXT_PHYPDEN;
   3587 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3588 
   3589 			/* TARC1 */
   3590 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3591 			/* bit 28 */
   3592 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3593 				tarc1 &= ~__BIT(28);
   3594 			else
   3595 				tarc1 |= __BIT(28);
   3596 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3597 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3598 
   3599 			/* Device Status */
   3600 			if (sc->sc_type == WM_T_ICH8) {
   3601 				reg = CSR_READ(sc, WMREG_STATUS);
   3602 				reg &= ~__BIT(31);
   3603 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3604 
   3605 			}
   3606 
   3607 			/* IOSFPC */
   3608 			if (sc->sc_type == WM_T_PCH_SPT) {
   3609 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3610 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3611 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3612 			}
   3613 			/*
   3614 			 * Work-around descriptor data corruption issue during
   3615 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3616 			 * capability.
   3617 			 */
   3618 			reg = CSR_READ(sc, WMREG_RFCTL);
   3619 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3620 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3621 			break;
   3622 		default:
   3623 			break;
   3624 		}
   3625 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3626 
   3627 		/*
   3628 		 * 8257[12] Errata No.52 and some others.
   3629 		 * Avoid RSS Hash Value bug.
   3630 		 */
   3631 		switch (sc->sc_type) {
   3632 		case WM_T_82571:
   3633 		case WM_T_82572:
   3634 		case WM_T_82573:
   3635 		case WM_T_80003:
   3636 		case WM_T_ICH8:
   3637 			reg = CSR_READ(sc, WMREG_RFCTL);
   3638 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3639 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3640 			break;
   3641 		default:
   3642 			break;
   3643 		}
   3644 	}
   3645 }
   3646 
   3647 static uint32_t
   3648 wm_rxpbs_adjust_82580(uint32_t val)
   3649 {
   3650 	uint32_t rv = 0;
   3651 
   3652 	if (val < __arraycount(wm_82580_rxpbs_table))
   3653 		rv = wm_82580_rxpbs_table[val];
   3654 
   3655 	return rv;
   3656 }
   3657 
   3658 /*
   3659  * wm_reset:
   3660  *
   3661  *	Reset the i82542 chip.
   3662  */
   3663 static void
   3664 wm_reset(struct wm_softc *sc)
   3665 {
   3666 	int phy_reset = 0;
   3667 	int i, error = 0;
   3668 	uint32_t reg, mask;
   3669 
   3670 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3671 		device_xname(sc->sc_dev), __func__));
   3672 	/*
   3673 	 * Allocate on-chip memory according to the MTU size.
   3674 	 * The Packet Buffer Allocation register must be written
   3675 	 * before the chip is reset.
   3676 	 */
   3677 	switch (sc->sc_type) {
   3678 	case WM_T_82547:
   3679 	case WM_T_82547_2:
   3680 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3681 		    PBA_22K : PBA_30K;
   3682 		for (i = 0; i < sc->sc_nqueues; i++) {
   3683 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3684 			txq->txq_fifo_head = 0;
   3685 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3686 			txq->txq_fifo_size =
   3687 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3688 			txq->txq_fifo_stall = 0;
   3689 		}
   3690 		break;
   3691 	case WM_T_82571:
   3692 	case WM_T_82572:
   3693 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3694 	case WM_T_80003:
   3695 		sc->sc_pba = PBA_32K;
   3696 		break;
   3697 	case WM_T_82573:
   3698 		sc->sc_pba = PBA_12K;
   3699 		break;
   3700 	case WM_T_82574:
   3701 	case WM_T_82583:
   3702 		sc->sc_pba = PBA_20K;
   3703 		break;
   3704 	case WM_T_82576:
   3705 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3706 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3707 		break;
   3708 	case WM_T_82580:
   3709 	case WM_T_I350:
   3710 	case WM_T_I354:
   3711 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3712 		break;
   3713 	case WM_T_I210:
   3714 	case WM_T_I211:
   3715 		sc->sc_pba = PBA_34K;
   3716 		break;
   3717 	case WM_T_ICH8:
   3718 		/* Workaround for a bit corruption issue in FIFO memory */
   3719 		sc->sc_pba = PBA_8K;
   3720 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3721 		break;
   3722 	case WM_T_ICH9:
   3723 	case WM_T_ICH10:
   3724 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3725 		    PBA_14K : PBA_10K;
   3726 		break;
   3727 	case WM_T_PCH:
   3728 	case WM_T_PCH2:
   3729 	case WM_T_PCH_LPT:
   3730 	case WM_T_PCH_SPT:
   3731 		sc->sc_pba = PBA_26K;
   3732 		break;
   3733 	default:
   3734 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3735 		    PBA_40K : PBA_48K;
   3736 		break;
   3737 	}
   3738 	/*
   3739 	 * Only old or non-multiqueue devices have the PBA register
   3740 	 * XXX Need special handling for 82575.
   3741 	 */
   3742 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3743 	    || (sc->sc_type == WM_T_82575))
   3744 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3745 
   3746 	/* Prevent the PCI-E bus from sticking */
   3747 	if (sc->sc_flags & WM_F_PCIE) {
   3748 		int timeout = 800;
   3749 
   3750 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3751 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3752 
   3753 		while (timeout--) {
   3754 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3755 			    == 0)
   3756 				break;
   3757 			delay(100);
   3758 		}
   3759 	}
   3760 
   3761 	/* Set the completion timeout for interface */
   3762 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3763 	    || (sc->sc_type == WM_T_82580)
   3764 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3765 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3766 		wm_set_pcie_completion_timeout(sc);
   3767 
   3768 	/* Clear interrupt */
   3769 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3770 	if (sc->sc_nintrs > 1) {
   3771 		if (sc->sc_type != WM_T_82574) {
   3772 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3773 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3774 		} else {
   3775 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3776 		}
   3777 	}
   3778 
   3779 	/* Stop the transmit and receive processes. */
   3780 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3781 	sc->sc_rctl &= ~RCTL_EN;
   3782 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3783 	CSR_WRITE_FLUSH(sc);
   3784 
   3785 	/* XXX set_tbi_sbp_82543() */
   3786 
   3787 	delay(10*1000);
   3788 
   3789 	/* Must acquire the MDIO ownership before MAC reset */
   3790 	switch (sc->sc_type) {
   3791 	case WM_T_82573:
   3792 	case WM_T_82574:
   3793 	case WM_T_82583:
   3794 		error = wm_get_hw_semaphore_82573(sc);
   3795 		break;
   3796 	default:
   3797 		break;
   3798 	}
   3799 
   3800 	/*
   3801 	 * 82541 Errata 29? & 82547 Errata 28?
   3802 	 * See also the description about PHY_RST bit in CTRL register
   3803 	 * in 8254x_GBe_SDM.pdf.
   3804 	 */
   3805 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3806 		CSR_WRITE(sc, WMREG_CTRL,
   3807 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3808 		CSR_WRITE_FLUSH(sc);
   3809 		delay(5000);
   3810 	}
   3811 
   3812 	switch (sc->sc_type) {
   3813 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3814 	case WM_T_82541:
   3815 	case WM_T_82541_2:
   3816 	case WM_T_82547:
   3817 	case WM_T_82547_2:
   3818 		/*
   3819 		 * On some chipsets, a reset through a memory-mapped write
   3820 		 * cycle can cause the chip to reset before completing the
   3821 		 * write cycle.  This causes major headache that can be
   3822 		 * avoided by issuing the reset via indirect register writes
   3823 		 * through I/O space.
   3824 		 *
   3825 		 * So, if we successfully mapped the I/O BAR at attach time,
   3826 		 * use that.  Otherwise, try our luck with a memory-mapped
   3827 		 * reset.
   3828 		 */
   3829 		if (sc->sc_flags & WM_F_IOH_VALID)
   3830 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3831 		else
   3832 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3833 		break;
   3834 	case WM_T_82545_3:
   3835 	case WM_T_82546_3:
   3836 		/* Use the shadow control register on these chips. */
   3837 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3838 		break;
   3839 	case WM_T_80003:
   3840 		mask = swfwphysem[sc->sc_funcid];
   3841 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3842 		wm_get_swfw_semaphore(sc, mask);
   3843 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3844 		wm_put_swfw_semaphore(sc, mask);
   3845 		break;
   3846 	case WM_T_ICH8:
   3847 	case WM_T_ICH9:
   3848 	case WM_T_ICH10:
   3849 	case WM_T_PCH:
   3850 	case WM_T_PCH2:
   3851 	case WM_T_PCH_LPT:
   3852 	case WM_T_PCH_SPT:
   3853 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3854 		if (wm_phy_resetisblocked(sc) == false) {
   3855 			/*
   3856 			 * Gate automatic PHY configuration by hardware on
   3857 			 * non-managed 82579
   3858 			 */
   3859 			if ((sc->sc_type == WM_T_PCH2)
   3860 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3861 				== 0))
   3862 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3863 
   3864 			reg |= CTRL_PHY_RESET;
   3865 			phy_reset = 1;
   3866 		} else
   3867 			printf("XXX reset is blocked!!!\n");
   3868 		wm_get_swfwhw_semaphore(sc);
   3869 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3870 		/* Don't insert a completion barrier when reset */
   3871 		delay(20*1000);
   3872 		wm_put_swfwhw_semaphore(sc);
   3873 		break;
   3874 	case WM_T_82580:
   3875 	case WM_T_I350:
   3876 	case WM_T_I354:
   3877 	case WM_T_I210:
   3878 	case WM_T_I211:
   3879 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3880 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3881 			CSR_WRITE_FLUSH(sc);
   3882 		delay(5000);
   3883 		break;
   3884 	case WM_T_82542_2_0:
   3885 	case WM_T_82542_2_1:
   3886 	case WM_T_82543:
   3887 	case WM_T_82540:
   3888 	case WM_T_82545:
   3889 	case WM_T_82546:
   3890 	case WM_T_82571:
   3891 	case WM_T_82572:
   3892 	case WM_T_82573:
   3893 	case WM_T_82574:
   3894 	case WM_T_82575:
   3895 	case WM_T_82576:
   3896 	case WM_T_82583:
   3897 	default:
   3898 		/* Everything else can safely use the documented method. */
   3899 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3900 		break;
   3901 	}
   3902 
   3903 	/* Must release the MDIO ownership after MAC reset */
   3904 	switch (sc->sc_type) {
   3905 	case WM_T_82573:
   3906 	case WM_T_82574:
   3907 	case WM_T_82583:
   3908 		if (error == 0)
   3909 			wm_put_hw_semaphore_82573(sc);
   3910 		break;
   3911 	default:
   3912 		break;
   3913 	}
   3914 
   3915 	if (phy_reset != 0)
   3916 		wm_get_cfg_done(sc);
   3917 
   3918 	/* reload EEPROM */
   3919 	switch (sc->sc_type) {
   3920 	case WM_T_82542_2_0:
   3921 	case WM_T_82542_2_1:
   3922 	case WM_T_82543:
   3923 	case WM_T_82544:
   3924 		delay(10);
   3925 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3926 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3927 		CSR_WRITE_FLUSH(sc);
   3928 		delay(2000);
   3929 		break;
   3930 	case WM_T_82540:
   3931 	case WM_T_82545:
   3932 	case WM_T_82545_3:
   3933 	case WM_T_82546:
   3934 	case WM_T_82546_3:
   3935 		delay(5*1000);
   3936 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3937 		break;
   3938 	case WM_T_82541:
   3939 	case WM_T_82541_2:
   3940 	case WM_T_82547:
   3941 	case WM_T_82547_2:
   3942 		delay(20000);
   3943 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3944 		break;
   3945 	case WM_T_82571:
   3946 	case WM_T_82572:
   3947 	case WM_T_82573:
   3948 	case WM_T_82574:
   3949 	case WM_T_82583:
   3950 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3951 			delay(10);
   3952 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3953 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3954 			CSR_WRITE_FLUSH(sc);
   3955 		}
   3956 		/* check EECD_EE_AUTORD */
   3957 		wm_get_auto_rd_done(sc);
   3958 		/*
   3959 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3960 		 * is set.
   3961 		 */
   3962 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3963 		    || (sc->sc_type == WM_T_82583))
   3964 			delay(25*1000);
   3965 		break;
   3966 	case WM_T_82575:
   3967 	case WM_T_82576:
   3968 	case WM_T_82580:
   3969 	case WM_T_I350:
   3970 	case WM_T_I354:
   3971 	case WM_T_I210:
   3972 	case WM_T_I211:
   3973 	case WM_T_80003:
   3974 		/* check EECD_EE_AUTORD */
   3975 		wm_get_auto_rd_done(sc);
   3976 		break;
   3977 	case WM_T_ICH8:
   3978 	case WM_T_ICH9:
   3979 	case WM_T_ICH10:
   3980 	case WM_T_PCH:
   3981 	case WM_T_PCH2:
   3982 	case WM_T_PCH_LPT:
   3983 	case WM_T_PCH_SPT:
   3984 		break;
   3985 	default:
   3986 		panic("%s: unknown type\n", __func__);
   3987 	}
   3988 
   3989 	/* Check whether EEPROM is present or not */
   3990 	switch (sc->sc_type) {
   3991 	case WM_T_82575:
   3992 	case WM_T_82576:
   3993 	case WM_T_82580:
   3994 	case WM_T_I350:
   3995 	case WM_T_I354:
   3996 	case WM_T_ICH8:
   3997 	case WM_T_ICH9:
   3998 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3999 			/* Not found */
   4000 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4001 			if (sc->sc_type == WM_T_82575)
   4002 				wm_reset_init_script_82575(sc);
   4003 		}
   4004 		break;
   4005 	default:
   4006 		break;
   4007 	}
   4008 
   4009 	if ((sc->sc_type == WM_T_82580)
   4010 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4011 		/* clear global device reset status bit */
   4012 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4013 	}
   4014 
   4015 	/* Clear any pending interrupt events. */
   4016 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4017 	reg = CSR_READ(sc, WMREG_ICR);
   4018 	if (sc->sc_nintrs > 1) {
   4019 		if (sc->sc_type != WM_T_82574) {
   4020 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4021 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4022 		} else
   4023 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4024 	}
   4025 
   4026 	/* reload sc_ctrl */
   4027 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4028 
   4029 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4030 		wm_set_eee_i350(sc);
   4031 
   4032 	/* dummy read from WUC */
   4033 	if (sc->sc_type == WM_T_PCH)
   4034 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4035 	/*
   4036 	 * For PCH, this write will make sure that any noise will be detected
   4037 	 * as a CRC error and be dropped rather than show up as a bad packet
   4038 	 * to the DMA engine
   4039 	 */
   4040 	if (sc->sc_type == WM_T_PCH)
   4041 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4042 
   4043 	if (sc->sc_type >= WM_T_82544)
   4044 		CSR_WRITE(sc, WMREG_WUC, 0);
   4045 
   4046 	wm_reset_mdicnfg_82580(sc);
   4047 
   4048 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4049 		wm_pll_workaround_i210(sc);
   4050 }
   4051 
   4052 /*
   4053  * wm_add_rxbuf:
   4054  *
   4055  *	Add a receive buffer to the indiciated descriptor.
   4056  */
   4057 static int
   4058 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4059 {
   4060 	struct wm_softc *sc = rxq->rxq_sc;
   4061 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4062 	struct mbuf *m;
   4063 	int error;
   4064 
   4065 	KASSERT(mutex_owned(rxq->rxq_lock));
   4066 
   4067 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4068 	if (m == NULL)
   4069 		return ENOBUFS;
   4070 
   4071 	MCLGET(m, M_DONTWAIT);
   4072 	if ((m->m_flags & M_EXT) == 0) {
   4073 		m_freem(m);
   4074 		return ENOBUFS;
   4075 	}
   4076 
   4077 	if (rxs->rxs_mbuf != NULL)
   4078 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4079 
   4080 	rxs->rxs_mbuf = m;
   4081 
   4082 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4083 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4084 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4085 	if (error) {
   4086 		/* XXX XXX XXX */
   4087 		aprint_error_dev(sc->sc_dev,
   4088 		    "unable to load rx DMA map %d, error = %d\n",
   4089 		    idx, error);
   4090 		panic("wm_add_rxbuf");
   4091 	}
   4092 
   4093 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4094 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4095 
   4096 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4097 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4098 			wm_init_rxdesc(rxq, idx);
   4099 	} else
   4100 		wm_init_rxdesc(rxq, idx);
   4101 
   4102 	return 0;
   4103 }
   4104 
   4105 /*
   4106  * wm_rxdrain:
   4107  *
   4108  *	Drain the receive queue.
   4109  */
   4110 static void
   4111 wm_rxdrain(struct wm_rxqueue *rxq)
   4112 {
   4113 	struct wm_softc *sc = rxq->rxq_sc;
   4114 	struct wm_rxsoft *rxs;
   4115 	int i;
   4116 
   4117 	KASSERT(mutex_owned(rxq->rxq_lock));
   4118 
   4119 	for (i = 0; i < WM_NRXDESC; i++) {
   4120 		rxs = &rxq->rxq_soft[i];
   4121 		if (rxs->rxs_mbuf != NULL) {
   4122 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4123 			m_freem(rxs->rxs_mbuf);
   4124 			rxs->rxs_mbuf = NULL;
   4125 		}
   4126 	}
   4127 }
   4128 
   4129 
   4130 /*
   4131  * XXX copy from FreeBSD's sys/net/rss_config.c
   4132  */
   4133 /*
   4134  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4135  * effectiveness may be limited by algorithm choice and available entropy
   4136  * during the boot.
   4137  *
   4138  * XXXRW: And that we don't randomize it yet!
   4139  *
   4140  * This is the default Microsoft RSS specification key which is also
   4141  * the Chelsio T5 firmware default key.
   4142  */
   4143 #define RSS_KEYSIZE 40
   4144 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4145 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4146 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4147 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4148 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4149 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4150 };
   4151 
   4152 /*
   4153  * Caller must pass an array of size sizeof(rss_key).
   4154  *
   4155  * XXX
   4156  * As if_ixgbe may use this function, this function should not be
   4157  * if_wm specific function.
   4158  */
   4159 static void
   4160 wm_rss_getkey(uint8_t *key)
   4161 {
   4162 
   4163 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4164 }
   4165 
   4166 /*
   4167  * Setup registers for RSS.
   4168  *
   4169  * XXX not yet VMDq support
   4170  */
   4171 static void
   4172 wm_init_rss(struct wm_softc *sc)
   4173 {
   4174 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4175 	int i;
   4176 
   4177 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4178 
   4179 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4180 		int qid, reta_ent;
   4181 
   4182 		qid  = i % sc->sc_nqueues;
   4183 		switch(sc->sc_type) {
   4184 		case WM_T_82574:
   4185 			reta_ent = __SHIFTIN(qid,
   4186 			    RETA_ENT_QINDEX_MASK_82574);
   4187 			break;
   4188 		case WM_T_82575:
   4189 			reta_ent = __SHIFTIN(qid,
   4190 			    RETA_ENT_QINDEX1_MASK_82575);
   4191 			break;
   4192 		default:
   4193 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4194 			break;
   4195 		}
   4196 
   4197 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4198 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4199 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4200 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4201 	}
   4202 
   4203 	wm_rss_getkey((uint8_t *)rss_key);
   4204 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4205 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4206 
   4207 	if (sc->sc_type == WM_T_82574)
   4208 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4209 	else
   4210 		mrqc = MRQC_ENABLE_RSS_MQ;
   4211 
   4212 	/* XXXX
   4213 	 * The same as FreeBSD igb.
   4214 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4215 	 */
   4216 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4217 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4218 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4219 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4220 
   4221 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4222 }
   4223 
   4224 /*
   4225  * Adjust TX and RX queue numbers which the system actulally uses.
   4226  *
   4227  * The numbers are affected by below parameters.
   4228  *     - The nubmer of hardware queues
   4229  *     - The number of MSI-X vectors (= "nvectors" argument)
   4230  *     - ncpu
   4231  */
   4232 static void
   4233 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4234 {
   4235 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4236 
   4237 	if (nvectors < 2) {
   4238 		sc->sc_nqueues = 1;
   4239 		return;
   4240 	}
   4241 
   4242 	switch(sc->sc_type) {
   4243 	case WM_T_82572:
   4244 		hw_ntxqueues = 2;
   4245 		hw_nrxqueues = 2;
   4246 		break;
   4247 	case WM_T_82574:
   4248 		hw_ntxqueues = 2;
   4249 		hw_nrxqueues = 2;
   4250 		break;
   4251 	case WM_T_82575:
   4252 		hw_ntxqueues = 4;
   4253 		hw_nrxqueues = 4;
   4254 		break;
   4255 	case WM_T_82576:
   4256 		hw_ntxqueues = 16;
   4257 		hw_nrxqueues = 16;
   4258 		break;
   4259 	case WM_T_82580:
   4260 	case WM_T_I350:
   4261 	case WM_T_I354:
   4262 		hw_ntxqueues = 8;
   4263 		hw_nrxqueues = 8;
   4264 		break;
   4265 	case WM_T_I210:
   4266 		hw_ntxqueues = 4;
   4267 		hw_nrxqueues = 4;
   4268 		break;
   4269 	case WM_T_I211:
   4270 		hw_ntxqueues = 2;
   4271 		hw_nrxqueues = 2;
   4272 		break;
   4273 		/*
   4274 		 * As below ethernet controllers does not support MSI-X,
   4275 		 * this driver let them not use multiqueue.
   4276 		 *     - WM_T_80003
   4277 		 *     - WM_T_ICH8
   4278 		 *     - WM_T_ICH9
   4279 		 *     - WM_T_ICH10
   4280 		 *     - WM_T_PCH
   4281 		 *     - WM_T_PCH2
   4282 		 *     - WM_T_PCH_LPT
   4283 		 */
   4284 	default:
   4285 		hw_ntxqueues = 1;
   4286 		hw_nrxqueues = 1;
   4287 		break;
   4288 	}
   4289 
   4290 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4291 
   4292 	/*
   4293 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4294 	 * the number of queues used actually.
   4295 	 */
   4296 	if (nvectors < hw_nqueues + 1) {
   4297 		sc->sc_nqueues = nvectors - 1;
   4298 	} else {
   4299 		sc->sc_nqueues = hw_nqueues;
   4300 	}
   4301 
   4302 	/*
   4303 	 * As queues more then cpus cannot improve scaling, we limit
   4304 	 * the number of queues used actually.
   4305 	 */
   4306 	if (ncpu < sc->sc_nqueues)
   4307 		sc->sc_nqueues = ncpu;
   4308 }
   4309 
   4310 /*
   4311  * Both single interrupt MSI and INTx can use this function.
   4312  */
   4313 static int
   4314 wm_setup_legacy(struct wm_softc *sc)
   4315 {
   4316 	pci_chipset_tag_t pc = sc->sc_pc;
   4317 	const char *intrstr = NULL;
   4318 	char intrbuf[PCI_INTRSTR_LEN];
   4319 	int error;
   4320 
   4321 	error = wm_alloc_txrx_queues(sc);
   4322 	if (error) {
   4323 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4324 		    error);
   4325 		return ENOMEM;
   4326 	}
   4327 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4328 	    sizeof(intrbuf));
   4329 #ifdef WM_MPSAFE
   4330 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4331 #endif
   4332 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4333 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4334 	if (sc->sc_ihs[0] == NULL) {
   4335 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4336 		    (pci_intr_type(sc->sc_intrs[0])
   4337 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4338 		return ENOMEM;
   4339 	}
   4340 
   4341 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4342 	sc->sc_nintrs = 1;
   4343 	return 0;
   4344 }
   4345 
   4346 static int
   4347 wm_setup_msix(struct wm_softc *sc)
   4348 {
   4349 	void *vih;
   4350 	kcpuset_t *affinity;
   4351 	int qidx, error, intr_idx, txrx_established;
   4352 	pci_chipset_tag_t pc = sc->sc_pc;
   4353 	const char *intrstr = NULL;
   4354 	char intrbuf[PCI_INTRSTR_LEN];
   4355 	char intr_xname[INTRDEVNAMEBUF];
   4356 
   4357 	if (sc->sc_nqueues < ncpu) {
   4358 		/*
   4359 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4360 		 * interrupts start from CPU#1.
   4361 		 */
   4362 		sc->sc_affinity_offset = 1;
   4363 	} else {
   4364 		/*
   4365 		 * In this case, this device use all CPUs. So, we unify
   4366 		 * affinitied cpu_index to msix vector number for readability.
   4367 		 */
   4368 		sc->sc_affinity_offset = 0;
   4369 	}
   4370 
   4371 	error = wm_alloc_txrx_queues(sc);
   4372 	if (error) {
   4373 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4374 		    error);
   4375 		return ENOMEM;
   4376 	}
   4377 
   4378 	kcpuset_create(&affinity, false);
   4379 	intr_idx = 0;
   4380 
   4381 	/*
   4382 	 * TX and RX
   4383 	 */
   4384 	txrx_established = 0;
   4385 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4386 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4387 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4388 
   4389 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4390 		    sizeof(intrbuf));
   4391 #ifdef WM_MPSAFE
   4392 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4393 		    PCI_INTR_MPSAFE, true);
   4394 #endif
   4395 		memset(intr_xname, 0, sizeof(intr_xname));
   4396 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4397 		    device_xname(sc->sc_dev), qidx);
   4398 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4399 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4400 		if (vih == NULL) {
   4401 			aprint_error_dev(sc->sc_dev,
   4402 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4403 			    intrstr ? " at " : "",
   4404 			    intrstr ? intrstr : "");
   4405 
   4406 			goto fail;
   4407 		}
   4408 		kcpuset_zero(affinity);
   4409 		/* Round-robin affinity */
   4410 		kcpuset_set(affinity, affinity_to);
   4411 		error = interrupt_distribute(vih, affinity, NULL);
   4412 		if (error == 0) {
   4413 			aprint_normal_dev(sc->sc_dev,
   4414 			    "for TX and RX interrupting at %s affinity to %u\n",
   4415 			    intrstr, affinity_to);
   4416 		} else {
   4417 			aprint_normal_dev(sc->sc_dev,
   4418 			    "for TX and RX interrupting at %s\n", intrstr);
   4419 		}
   4420 		sc->sc_ihs[intr_idx] = vih;
   4421 		wmq->wmq_id= qidx;
   4422 		wmq->wmq_intr_idx = intr_idx;
   4423 
   4424 		txrx_established++;
   4425 		intr_idx++;
   4426 	}
   4427 
   4428 	/*
   4429 	 * LINK
   4430 	 */
   4431 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4432 	    sizeof(intrbuf));
   4433 #ifdef WM_MPSAFE
   4434 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4435 #endif
   4436 	memset(intr_xname, 0, sizeof(intr_xname));
   4437 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4438 	    device_xname(sc->sc_dev));
   4439 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4440 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4441 	if (vih == NULL) {
   4442 		aprint_error_dev(sc->sc_dev,
   4443 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4444 		    intrstr ? " at " : "",
   4445 		    intrstr ? intrstr : "");
   4446 
   4447 		goto fail;
   4448 	}
   4449 	/* keep default affinity to LINK interrupt */
   4450 	aprint_normal_dev(sc->sc_dev,
   4451 	    "for LINK interrupting at %s\n", intrstr);
   4452 	sc->sc_ihs[intr_idx] = vih;
   4453 	sc->sc_link_intr_idx = intr_idx;
   4454 
   4455 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4456 	kcpuset_destroy(affinity);
   4457 	return 0;
   4458 
   4459  fail:
   4460 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4461 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4462 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4463 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4464 	}
   4465 
   4466 	kcpuset_destroy(affinity);
   4467 	return ENOMEM;
   4468 }
   4469 
   4470 /*
   4471  * wm_init:		[ifnet interface function]
   4472  *
   4473  *	Initialize the interface.
   4474  */
   4475 static int
   4476 wm_init(struct ifnet *ifp)
   4477 {
   4478 	struct wm_softc *sc = ifp->if_softc;
   4479 	int ret;
   4480 
   4481 	WM_CORE_LOCK(sc);
   4482 	ret = wm_init_locked(ifp);
   4483 	WM_CORE_UNLOCK(sc);
   4484 
   4485 	return ret;
   4486 }
   4487 
   4488 static int
   4489 wm_init_locked(struct ifnet *ifp)
   4490 {
   4491 	struct wm_softc *sc = ifp->if_softc;
   4492 	int i, j, trynum, error = 0;
   4493 	uint32_t reg;
   4494 
   4495 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4496 		device_xname(sc->sc_dev), __func__));
   4497 	KASSERT(WM_CORE_LOCKED(sc));
   4498 	/*
   4499 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4500 	 * There is a small but measurable benefit to avoiding the adjusment
   4501 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4502 	 * on such platforms.  One possibility is that the DMA itself is
   4503 	 * slightly more efficient if the front of the entire packet (instead
   4504 	 * of the front of the headers) is aligned.
   4505 	 *
   4506 	 * Note we must always set align_tweak to 0 if we are using
   4507 	 * jumbo frames.
   4508 	 */
   4509 #ifdef __NO_STRICT_ALIGNMENT
   4510 	sc->sc_align_tweak = 0;
   4511 #else
   4512 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4513 		sc->sc_align_tweak = 0;
   4514 	else
   4515 		sc->sc_align_tweak = 2;
   4516 #endif /* __NO_STRICT_ALIGNMENT */
   4517 
   4518 	/* Cancel any pending I/O. */
   4519 	wm_stop_locked(ifp, 0);
   4520 
   4521 	/* update statistics before reset */
   4522 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4523 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4524 
   4525 	/* Reset the chip to a known state. */
   4526 	wm_reset(sc);
   4527 
   4528 	switch (sc->sc_type) {
   4529 	case WM_T_82571:
   4530 	case WM_T_82572:
   4531 	case WM_T_82573:
   4532 	case WM_T_82574:
   4533 	case WM_T_82583:
   4534 	case WM_T_80003:
   4535 	case WM_T_ICH8:
   4536 	case WM_T_ICH9:
   4537 	case WM_T_ICH10:
   4538 	case WM_T_PCH:
   4539 	case WM_T_PCH2:
   4540 	case WM_T_PCH_LPT:
   4541 	case WM_T_PCH_SPT:
   4542 		/* AMT based hardware can now take control from firmware */
   4543 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4544 			wm_get_hw_control(sc);
   4545 		break;
   4546 	default:
   4547 		break;
   4548 	}
   4549 
   4550 	/* Init hardware bits */
   4551 	wm_initialize_hardware_bits(sc);
   4552 
   4553 	/* Reset the PHY. */
   4554 	if (sc->sc_flags & WM_F_HAS_MII)
   4555 		wm_gmii_reset(sc);
   4556 
   4557 	/* Calculate (E)ITR value */
   4558 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4559 		sc->sc_itr = 450;	/* For EITR */
   4560 	} else if (sc->sc_type >= WM_T_82543) {
   4561 		/*
   4562 		 * Set up the interrupt throttling register (units of 256ns)
   4563 		 * Note that a footnote in Intel's documentation says this
   4564 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4565 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4566 		 * that that is also true for the 1024ns units of the other
   4567 		 * interrupt-related timer registers -- so, really, we ought
   4568 		 * to divide this value by 4 when the link speed is low.
   4569 		 *
   4570 		 * XXX implement this division at link speed change!
   4571 		 */
   4572 
   4573 		/*
   4574 		 * For N interrupts/sec, set this value to:
   4575 		 * 1000000000 / (N * 256).  Note that we set the
   4576 		 * absolute and packet timer values to this value
   4577 		 * divided by 4 to get "simple timer" behavior.
   4578 		 */
   4579 
   4580 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4581 	}
   4582 
   4583 	error = wm_init_txrx_queues(sc);
   4584 	if (error)
   4585 		goto out;
   4586 
   4587 	/*
   4588 	 * Clear out the VLAN table -- we don't use it (yet).
   4589 	 */
   4590 	CSR_WRITE(sc, WMREG_VET, 0);
   4591 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4592 		trynum = 10; /* Due to hw errata */
   4593 	else
   4594 		trynum = 1;
   4595 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4596 		for (j = 0; j < trynum; j++)
   4597 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4598 
   4599 	/*
   4600 	 * Set up flow-control parameters.
   4601 	 *
   4602 	 * XXX Values could probably stand some tuning.
   4603 	 */
   4604 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4605 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4606 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4607 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4608 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4609 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4610 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4611 	}
   4612 
   4613 	sc->sc_fcrtl = FCRTL_DFLT;
   4614 	if (sc->sc_type < WM_T_82543) {
   4615 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4616 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4617 	} else {
   4618 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4619 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4620 	}
   4621 
   4622 	if (sc->sc_type == WM_T_80003)
   4623 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4624 	else
   4625 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4626 
   4627 	/* Writes the control register. */
   4628 	wm_set_vlan(sc);
   4629 
   4630 	if (sc->sc_flags & WM_F_HAS_MII) {
   4631 		int val;
   4632 
   4633 		switch (sc->sc_type) {
   4634 		case WM_T_80003:
   4635 		case WM_T_ICH8:
   4636 		case WM_T_ICH9:
   4637 		case WM_T_ICH10:
   4638 		case WM_T_PCH:
   4639 		case WM_T_PCH2:
   4640 		case WM_T_PCH_LPT:
   4641 		case WM_T_PCH_SPT:
   4642 			/*
   4643 			 * Set the mac to wait the maximum time between each
   4644 			 * iteration and increase the max iterations when
   4645 			 * polling the phy; this fixes erroneous timeouts at
   4646 			 * 10Mbps.
   4647 			 */
   4648 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4649 			    0xFFFF);
   4650 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4651 			val |= 0x3F;
   4652 			wm_kmrn_writereg(sc,
   4653 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4654 			break;
   4655 		default:
   4656 			break;
   4657 		}
   4658 
   4659 		if (sc->sc_type == WM_T_80003) {
   4660 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4661 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4662 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4663 
   4664 			/* Bypass RX and TX FIFO's */
   4665 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4666 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4667 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4668 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4669 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4670 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4671 		}
   4672 	}
   4673 #if 0
   4674 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4675 #endif
   4676 
   4677 	/* Set up checksum offload parameters. */
   4678 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4679 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4680 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4681 		reg |= RXCSUM_IPOFL;
   4682 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4683 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4684 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4685 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4686 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4687 
   4688 	/* Set up MSI-X */
   4689 	if (sc->sc_nintrs > 1) {
   4690 		uint32_t ivar;
   4691 		struct wm_queue *wmq;
   4692 		int qid, qintr_idx;
   4693 
   4694 		if (sc->sc_type == WM_T_82575) {
   4695 			/* Interrupt control */
   4696 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4697 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4698 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4699 
   4700 			/* TX and RX */
   4701 			for (i = 0; i < sc->sc_nqueues; i++) {
   4702 				wmq = &sc->sc_queue[i];
   4703 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4704 				    EITR_TX_QUEUE(wmq->wmq_id)
   4705 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4706 			}
   4707 			/* Link status */
   4708 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4709 			    EITR_OTHER);
   4710 		} else if (sc->sc_type == WM_T_82574) {
   4711 			/* Interrupt control */
   4712 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4713 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4714 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4715 
   4716 			ivar = 0;
   4717 			/* TX and RX */
   4718 			for (i = 0; i < sc->sc_nqueues; i++) {
   4719 				wmq = &sc->sc_queue[i];
   4720 				qid = wmq->wmq_id;
   4721 				qintr_idx = wmq->wmq_intr_idx;
   4722 
   4723 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4724 				    IVAR_TX_MASK_Q_82574(qid));
   4725 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4726 				    IVAR_RX_MASK_Q_82574(qid));
   4727 			}
   4728 			/* Link status */
   4729 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4730 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4731 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4732 		} else {
   4733 			/* Interrupt control */
   4734 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4735 			    | GPIE_EIAME | GPIE_PBA);
   4736 
   4737 			switch (sc->sc_type) {
   4738 			case WM_T_82580:
   4739 			case WM_T_I350:
   4740 			case WM_T_I354:
   4741 			case WM_T_I210:
   4742 			case WM_T_I211:
   4743 				/* TX and RX */
   4744 				for (i = 0; i < sc->sc_nqueues; i++) {
   4745 					wmq = &sc->sc_queue[i];
   4746 					qid = wmq->wmq_id;
   4747 					qintr_idx = wmq->wmq_intr_idx;
   4748 
   4749 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4750 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4751 					ivar |= __SHIFTIN((qintr_idx
   4752 						| IVAR_VALID),
   4753 					    IVAR_TX_MASK_Q(qid));
   4754 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4755 					ivar |= __SHIFTIN((qintr_idx
   4756 						| IVAR_VALID),
   4757 					    IVAR_RX_MASK_Q(qid));
   4758 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4759 				}
   4760 				break;
   4761 			case WM_T_82576:
   4762 				/* TX and RX */
   4763 				for (i = 0; i < sc->sc_nqueues; i++) {
   4764 					wmq = &sc->sc_queue[i];
   4765 					qid = wmq->wmq_id;
   4766 					qintr_idx = wmq->wmq_intr_idx;
   4767 
   4768 					ivar = CSR_READ(sc,
   4769 					    WMREG_IVAR_Q_82576(qid));
   4770 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4771 					ivar |= __SHIFTIN((qintr_idx
   4772 						| IVAR_VALID),
   4773 					    IVAR_TX_MASK_Q_82576(qid));
   4774 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4775 					ivar |= __SHIFTIN((qintr_idx
   4776 						| IVAR_VALID),
   4777 					    IVAR_RX_MASK_Q_82576(qid));
   4778 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4779 					    ivar);
   4780 				}
   4781 				break;
   4782 			default:
   4783 				break;
   4784 			}
   4785 
   4786 			/* Link status */
   4787 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4788 			    IVAR_MISC_OTHER);
   4789 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4790 		}
   4791 
   4792 		if (sc->sc_nqueues > 1) {
   4793 			wm_init_rss(sc);
   4794 
   4795 			/*
   4796 			** NOTE: Receive Full-Packet Checksum Offload
   4797 			** is mutually exclusive with Multiqueue. However
   4798 			** this is not the same as TCP/IP checksums which
   4799 			** still work.
   4800 			*/
   4801 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4802 			reg |= RXCSUM_PCSD;
   4803 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4804 		}
   4805 	}
   4806 
   4807 	/* Set up the interrupt registers. */
   4808 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4809 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4810 	    ICR_RXO | ICR_RXT0;
   4811 	if (sc->sc_nintrs > 1) {
   4812 		uint32_t mask;
   4813 		struct wm_queue *wmq;
   4814 
   4815 		switch (sc->sc_type) {
   4816 		case WM_T_82574:
   4817 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4818 			    WMREG_EIAC_82574_MSIX_MASK);
   4819 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4820 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4821 			break;
   4822 		default:
   4823 			if (sc->sc_type == WM_T_82575) {
   4824 				mask = 0;
   4825 				for (i = 0; i < sc->sc_nqueues; i++) {
   4826 					wmq = &sc->sc_queue[i];
   4827 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4828 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4829 				}
   4830 				mask |= EITR_OTHER;
   4831 			} else {
   4832 				mask = 0;
   4833 				for (i = 0; i < sc->sc_nqueues; i++) {
   4834 					wmq = &sc->sc_queue[i];
   4835 					mask |= 1 << wmq->wmq_intr_idx;
   4836 				}
   4837 				mask |= 1 << sc->sc_link_intr_idx;
   4838 			}
   4839 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4840 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4841 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4842 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4843 			break;
   4844 		}
   4845 	} else
   4846 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4847 
   4848 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4849 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4850 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4851 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4852 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4853 		reg |= KABGTXD_BGSQLBIAS;
   4854 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4855 	}
   4856 
   4857 	/* Set up the inter-packet gap. */
   4858 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4859 
   4860 	if (sc->sc_type >= WM_T_82543) {
   4861 		/*
   4862 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4863 		 * the multi queue function with MSI-X.
   4864 		 */
   4865 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4866 			int qidx;
   4867 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4868 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   4869 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   4870 				    sc->sc_itr);
   4871 			}
   4872 			/*
   4873 			 * Link interrupts occur much less than TX
   4874 			 * interrupts and RX interrupts. So, we don't
   4875 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4876 			 * FreeBSD's if_igb.
   4877 			 */
   4878 		} else
   4879 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4880 	}
   4881 
   4882 	/* Set the VLAN ethernetype. */
   4883 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4884 
   4885 	/*
   4886 	 * Set up the transmit control register; we start out with
   4887 	 * a collision distance suitable for FDX, but update it whe
   4888 	 * we resolve the media type.
   4889 	 */
   4890 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4891 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4892 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4893 	if (sc->sc_type >= WM_T_82571)
   4894 		sc->sc_tctl |= TCTL_MULR;
   4895 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4896 
   4897 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4898 		/* Write TDT after TCTL.EN is set. See the document. */
   4899 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4900 	}
   4901 
   4902 	if (sc->sc_type == WM_T_80003) {
   4903 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4904 		reg &= ~TCTL_EXT_GCEX_MASK;
   4905 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4906 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4907 	}
   4908 
   4909 	/* Set the media. */
   4910 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4911 		goto out;
   4912 
   4913 	/* Configure for OS presence */
   4914 	wm_init_manageability(sc);
   4915 
   4916 	/*
   4917 	 * Set up the receive control register; we actually program
   4918 	 * the register when we set the receive filter.  Use multicast
   4919 	 * address offset type 0.
   4920 	 *
   4921 	 * Only the i82544 has the ability to strip the incoming
   4922 	 * CRC, so we don't enable that feature.
   4923 	 */
   4924 	sc->sc_mchash_type = 0;
   4925 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4926 	    | RCTL_MO(sc->sc_mchash_type);
   4927 
   4928 	/*
   4929 	 * The I350 has a bug where it always strips the CRC whether
   4930 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4931 	 */
   4932 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4933 	    || (sc->sc_type == WM_T_I210))
   4934 		sc->sc_rctl |= RCTL_SECRC;
   4935 
   4936 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4937 	    && (ifp->if_mtu > ETHERMTU)) {
   4938 		sc->sc_rctl |= RCTL_LPE;
   4939 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4940 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4941 	}
   4942 
   4943 	if (MCLBYTES == 2048) {
   4944 		sc->sc_rctl |= RCTL_2k;
   4945 	} else {
   4946 		if (sc->sc_type >= WM_T_82543) {
   4947 			switch (MCLBYTES) {
   4948 			case 4096:
   4949 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4950 				break;
   4951 			case 8192:
   4952 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4953 				break;
   4954 			case 16384:
   4955 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4956 				break;
   4957 			default:
   4958 				panic("wm_init: MCLBYTES %d unsupported",
   4959 				    MCLBYTES);
   4960 				break;
   4961 			}
   4962 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4963 	}
   4964 
   4965 	/* Set the receive filter. */
   4966 	wm_set_filter(sc);
   4967 
   4968 	/* Enable ECC */
   4969 	switch (sc->sc_type) {
   4970 	case WM_T_82571:
   4971 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4972 		reg |= PBA_ECC_CORR_EN;
   4973 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4974 		break;
   4975 	case WM_T_PCH_LPT:
   4976 	case WM_T_PCH_SPT:
   4977 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4978 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4979 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4980 
   4981 		reg = CSR_READ(sc, WMREG_CTRL);
   4982 		reg |= CTRL_MEHE;
   4983 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4984 		break;
   4985 	default:
   4986 		break;
   4987 	}
   4988 
   4989 	/* On 575 and later set RDT only if RX enabled */
   4990 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4991 		int qidx;
   4992 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4993 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   4994 			for (i = 0; i < WM_NRXDESC; i++) {
   4995 				mutex_enter(rxq->rxq_lock);
   4996 				wm_init_rxdesc(rxq, i);
   4997 				mutex_exit(rxq->rxq_lock);
   4998 
   4999 			}
   5000 		}
   5001 	}
   5002 
   5003 	sc->sc_stopping = false;
   5004 
   5005 	/* Start the one second link check clock. */
   5006 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5007 
   5008 	/* ...all done! */
   5009 	ifp->if_flags |= IFF_RUNNING;
   5010 	ifp->if_flags &= ~IFF_OACTIVE;
   5011 
   5012  out:
   5013 	sc->sc_if_flags = ifp->if_flags;
   5014 	if (error)
   5015 		log(LOG_ERR, "%s: interface not running\n",
   5016 		    device_xname(sc->sc_dev));
   5017 	return error;
   5018 }
   5019 
   5020 /*
   5021  * wm_stop:		[ifnet interface function]
   5022  *
   5023  *	Stop transmission on the interface.
   5024  */
   5025 static void
   5026 wm_stop(struct ifnet *ifp, int disable)
   5027 {
   5028 	struct wm_softc *sc = ifp->if_softc;
   5029 
   5030 	WM_CORE_LOCK(sc);
   5031 	wm_stop_locked(ifp, disable);
   5032 	WM_CORE_UNLOCK(sc);
   5033 }
   5034 
   5035 static void
   5036 wm_stop_locked(struct ifnet *ifp, int disable)
   5037 {
   5038 	struct wm_softc *sc = ifp->if_softc;
   5039 	struct wm_txsoft *txs;
   5040 	int i, qidx;
   5041 
   5042 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5043 		device_xname(sc->sc_dev), __func__));
   5044 	KASSERT(WM_CORE_LOCKED(sc));
   5045 
   5046 	sc->sc_stopping = true;
   5047 
   5048 	/* Stop the one second clock. */
   5049 	callout_stop(&sc->sc_tick_ch);
   5050 
   5051 	/* Stop the 82547 Tx FIFO stall check timer. */
   5052 	if (sc->sc_type == WM_T_82547)
   5053 		callout_stop(&sc->sc_txfifo_ch);
   5054 
   5055 	if (sc->sc_flags & WM_F_HAS_MII) {
   5056 		/* Down the MII. */
   5057 		mii_down(&sc->sc_mii);
   5058 	} else {
   5059 #if 0
   5060 		/* Should we clear PHY's status properly? */
   5061 		wm_reset(sc);
   5062 #endif
   5063 	}
   5064 
   5065 	/* Stop the transmit and receive processes. */
   5066 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5067 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5068 	sc->sc_rctl &= ~RCTL_EN;
   5069 
   5070 	/*
   5071 	 * Clear the interrupt mask to ensure the device cannot assert its
   5072 	 * interrupt line.
   5073 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5074 	 * service any currently pending or shared interrupt.
   5075 	 */
   5076 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5077 	sc->sc_icr = 0;
   5078 	if (sc->sc_nintrs > 1) {
   5079 		if (sc->sc_type != WM_T_82574) {
   5080 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5081 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5082 		} else
   5083 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5084 	}
   5085 
   5086 	/* Release any queued transmit buffers. */
   5087 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5088 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5089 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5090 		mutex_enter(txq->txq_lock);
   5091 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5092 			txs = &txq->txq_soft[i];
   5093 			if (txs->txs_mbuf != NULL) {
   5094 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5095 				m_freem(txs->txs_mbuf);
   5096 				txs->txs_mbuf = NULL;
   5097 			}
   5098 		}
   5099 		if (sc->sc_type == WM_T_PCH_SPT) {
   5100 			pcireg_t preg;
   5101 			uint32_t reg;
   5102 			int nexttx;
   5103 
   5104 			/* First, disable MULR fix in FEXTNVM11 */
   5105 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5106 			reg |= FEXTNVM11_DIS_MULRFIX;
   5107 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5108 
   5109 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5110 			    WM_PCI_DESCRING_STATUS);
   5111 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5112 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5113 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5114 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5115 			    && (reg != 0)) {
   5116 				/* TX */
   5117 				printf("XXX need TX flush (reg = %08x)\n",
   5118 				    preg);
   5119 				wm_init_tx_descs(sc, txq);
   5120 				wm_init_tx_regs(sc, wmq, txq);
   5121 				nexttx = txq->txq_next;
   5122 				wm_set_dma_addr(
   5123 					&txq->txq_descs[nexttx].wtx_addr,
   5124 					WM_CDTXADDR(txq, nexttx));
   5125 				txq->txq_descs[nexttx].wtx_cmdlen
   5126 				    = htole32(WTX_CMD_IFCS | 512);
   5127 				wm_cdtxsync(txq, nexttx, 1,
   5128 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5129 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5130 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5131 				CSR_WRITE_FLUSH(sc);
   5132 				delay(250);
   5133 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5134 			}
   5135 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5136 			    WM_PCI_DESCRING_STATUS);
   5137 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5138 				/* RX */
   5139 				printf("XXX need RX flush\n");
   5140 			}
   5141 		}
   5142 		mutex_exit(txq->txq_lock);
   5143 	}
   5144 
   5145 	/* Mark the interface as down and cancel the watchdog timer. */
   5146 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5147 	ifp->if_timer = 0;
   5148 
   5149 	if (disable) {
   5150 		for (i = 0; i < sc->sc_nqueues; i++) {
   5151 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5152 			mutex_enter(rxq->rxq_lock);
   5153 			wm_rxdrain(rxq);
   5154 			mutex_exit(rxq->rxq_lock);
   5155 		}
   5156 	}
   5157 
   5158 #if 0 /* notyet */
   5159 	if (sc->sc_type >= WM_T_82544)
   5160 		CSR_WRITE(sc, WMREG_WUC, 0);
   5161 #endif
   5162 }
   5163 
   5164 static void
   5165 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5166 {
   5167 	struct mbuf *m;
   5168 	int i;
   5169 
   5170 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5171 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5172 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5173 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5174 		    m->m_data, m->m_len, m->m_flags);
   5175 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5176 	    i, i == 1 ? "" : "s");
   5177 }
   5178 
   5179 /*
   5180  * wm_82547_txfifo_stall:
   5181  *
   5182  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5183  *	reset the FIFO pointers, and restart packet transmission.
   5184  */
   5185 static void
   5186 wm_82547_txfifo_stall(void *arg)
   5187 {
   5188 	struct wm_softc *sc = arg;
   5189 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5190 
   5191 	mutex_enter(txq->txq_lock);
   5192 
   5193 	if (sc->sc_stopping)
   5194 		goto out;
   5195 
   5196 	if (txq->txq_fifo_stall) {
   5197 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5198 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5199 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5200 			/*
   5201 			 * Packets have drained.  Stop transmitter, reset
   5202 			 * FIFO pointers, restart transmitter, and kick
   5203 			 * the packet queue.
   5204 			 */
   5205 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5206 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5207 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5208 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5209 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5210 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5211 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5212 			CSR_WRITE_FLUSH(sc);
   5213 
   5214 			txq->txq_fifo_head = 0;
   5215 			txq->txq_fifo_stall = 0;
   5216 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5217 		} else {
   5218 			/*
   5219 			 * Still waiting for packets to drain; try again in
   5220 			 * another tick.
   5221 			 */
   5222 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5223 		}
   5224 	}
   5225 
   5226 out:
   5227 	mutex_exit(txq->txq_lock);
   5228 }
   5229 
   5230 /*
   5231  * wm_82547_txfifo_bugchk:
   5232  *
   5233  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5234  *	prevent enqueueing a packet that would wrap around the end
   5235  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5236  *
   5237  *	We do this by checking the amount of space before the end
   5238  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5239  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5240  *	the internal FIFO pointers to the beginning, and restart
   5241  *	transmission on the interface.
   5242  */
   5243 #define	WM_FIFO_HDR		0x10
   5244 #define	WM_82547_PAD_LEN	0x3e0
   5245 static int
   5246 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5247 {
   5248 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5249 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5250 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5251 
   5252 	/* Just return if already stalled. */
   5253 	if (txq->txq_fifo_stall)
   5254 		return 1;
   5255 
   5256 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5257 		/* Stall only occurs in half-duplex mode. */
   5258 		goto send_packet;
   5259 	}
   5260 
   5261 	if (len >= WM_82547_PAD_LEN + space) {
   5262 		txq->txq_fifo_stall = 1;
   5263 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5264 		return 1;
   5265 	}
   5266 
   5267  send_packet:
   5268 	txq->txq_fifo_head += len;
   5269 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5270 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5271 
   5272 	return 0;
   5273 }
   5274 
   5275 static int
   5276 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5277 {
   5278 	int error;
   5279 
   5280 	/*
   5281 	 * Allocate the control data structures, and create and load the
   5282 	 * DMA map for it.
   5283 	 *
   5284 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5285 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5286 	 * both sets within the same 4G segment.
   5287 	 */
   5288 	if (sc->sc_type < WM_T_82544)
   5289 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5290 	else
   5291 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5292 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5293 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5294 	else
   5295 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5296 
   5297 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5298 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5299 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5300 		aprint_error_dev(sc->sc_dev,
   5301 		    "unable to allocate TX control data, error = %d\n",
   5302 		    error);
   5303 		goto fail_0;
   5304 	}
   5305 
   5306 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5307 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5308 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5309 		aprint_error_dev(sc->sc_dev,
   5310 		    "unable to map TX control data, error = %d\n", error);
   5311 		goto fail_1;
   5312 	}
   5313 
   5314 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5315 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5316 		aprint_error_dev(sc->sc_dev,
   5317 		    "unable to create TX control data DMA map, error = %d\n",
   5318 		    error);
   5319 		goto fail_2;
   5320 	}
   5321 
   5322 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5323 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5324 		aprint_error_dev(sc->sc_dev,
   5325 		    "unable to load TX control data DMA map, error = %d\n",
   5326 		    error);
   5327 		goto fail_3;
   5328 	}
   5329 
   5330 	return 0;
   5331 
   5332  fail_3:
   5333 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5334  fail_2:
   5335 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5336 	    WM_TXDESCS_SIZE(txq));
   5337  fail_1:
   5338 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5339  fail_0:
   5340 	return error;
   5341 }
   5342 
   5343 static void
   5344 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5345 {
   5346 
   5347 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5348 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5349 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5350 	    WM_TXDESCS_SIZE(txq));
   5351 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5352 }
   5353 
   5354 static int
   5355 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5356 {
   5357 	int error;
   5358 
   5359 	/*
   5360 	 * Allocate the control data structures, and create and load the
   5361 	 * DMA map for it.
   5362 	 *
   5363 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5364 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5365 	 * both sets within the same 4G segment.
   5366 	 */
   5367 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5368 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5369 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5370 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5371 		aprint_error_dev(sc->sc_dev,
   5372 		    "unable to allocate RX control data, error = %d\n",
   5373 		    error);
   5374 		goto fail_0;
   5375 	}
   5376 
   5377 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5378 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5379 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5380 		aprint_error_dev(sc->sc_dev,
   5381 		    "unable to map RX control data, error = %d\n", error);
   5382 		goto fail_1;
   5383 	}
   5384 
   5385 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5386 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5387 		aprint_error_dev(sc->sc_dev,
   5388 		    "unable to create RX control data DMA map, error = %d\n",
   5389 		    error);
   5390 		goto fail_2;
   5391 	}
   5392 
   5393 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5394 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5395 		aprint_error_dev(sc->sc_dev,
   5396 		    "unable to load RX control data DMA map, error = %d\n",
   5397 		    error);
   5398 		goto fail_3;
   5399 	}
   5400 
   5401 	return 0;
   5402 
   5403  fail_3:
   5404 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5405  fail_2:
   5406 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5407 	    rxq->rxq_desc_size);
   5408  fail_1:
   5409 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5410  fail_0:
   5411 	return error;
   5412 }
   5413 
   5414 static void
   5415 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5416 {
   5417 
   5418 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5419 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5420 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5421 	    rxq->rxq_desc_size);
   5422 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5423 }
   5424 
   5425 
   5426 static int
   5427 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5428 {
   5429 	int i, error;
   5430 
   5431 	/* Create the transmit buffer DMA maps. */
   5432 	WM_TXQUEUELEN(txq) =
   5433 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5434 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5435 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5436 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5437 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5438 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5439 			aprint_error_dev(sc->sc_dev,
   5440 			    "unable to create Tx DMA map %d, error = %d\n",
   5441 			    i, error);
   5442 			goto fail;
   5443 		}
   5444 	}
   5445 
   5446 	return 0;
   5447 
   5448  fail:
   5449 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5450 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5451 			bus_dmamap_destroy(sc->sc_dmat,
   5452 			    txq->txq_soft[i].txs_dmamap);
   5453 	}
   5454 	return error;
   5455 }
   5456 
   5457 static void
   5458 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5459 {
   5460 	int i;
   5461 
   5462 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5463 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5464 			bus_dmamap_destroy(sc->sc_dmat,
   5465 			    txq->txq_soft[i].txs_dmamap);
   5466 	}
   5467 }
   5468 
   5469 static int
   5470 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5471 {
   5472 	int i, error;
   5473 
   5474 	/* Create the receive buffer DMA maps. */
   5475 	for (i = 0; i < WM_NRXDESC; i++) {
   5476 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5477 			    MCLBYTES, 0, 0,
   5478 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5479 			aprint_error_dev(sc->sc_dev,
   5480 			    "unable to create Rx DMA map %d error = %d\n",
   5481 			    i, error);
   5482 			goto fail;
   5483 		}
   5484 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5485 	}
   5486 
   5487 	return 0;
   5488 
   5489  fail:
   5490 	for (i = 0; i < WM_NRXDESC; i++) {
   5491 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5492 			bus_dmamap_destroy(sc->sc_dmat,
   5493 			    rxq->rxq_soft[i].rxs_dmamap);
   5494 	}
   5495 	return error;
   5496 }
   5497 
   5498 static void
   5499 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5500 {
   5501 	int i;
   5502 
   5503 	for (i = 0; i < WM_NRXDESC; i++) {
   5504 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5505 			bus_dmamap_destroy(sc->sc_dmat,
   5506 			    rxq->rxq_soft[i].rxs_dmamap);
   5507 	}
   5508 }
   5509 
   5510 /*
   5511  * wm_alloc_quques:
   5512  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5513  */
   5514 static int
   5515 wm_alloc_txrx_queues(struct wm_softc *sc)
   5516 {
   5517 	int i, error, tx_done, rx_done;
   5518 
   5519 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5520 	    KM_SLEEP);
   5521 	if (sc->sc_queue == NULL) {
   5522 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5523 		error = ENOMEM;
   5524 		goto fail_0;
   5525 	}
   5526 
   5527 	/*
   5528 	 * For transmission
   5529 	 */
   5530 	error = 0;
   5531 	tx_done = 0;
   5532 	for (i = 0; i < sc->sc_nqueues; i++) {
   5533 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5534 		txq->txq_sc = sc;
   5535 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5536 
   5537 		error = wm_alloc_tx_descs(sc, txq);
   5538 		if (error)
   5539 			break;
   5540 		error = wm_alloc_tx_buffer(sc, txq);
   5541 		if (error) {
   5542 			wm_free_tx_descs(sc, txq);
   5543 			break;
   5544 		}
   5545 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5546 		if (txq->txq_interq == NULL) {
   5547 			wm_free_tx_descs(sc, txq);
   5548 			wm_free_tx_buffer(sc, txq);
   5549 			error = ENOMEM;
   5550 			break;
   5551 		}
   5552 		tx_done++;
   5553 	}
   5554 	if (error)
   5555 		goto fail_1;
   5556 
   5557 	/*
   5558 	 * For recieve
   5559 	 */
   5560 	error = 0;
   5561 	rx_done = 0;
   5562 	for (i = 0; i < sc->sc_nqueues; i++) {
   5563 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5564 		rxq->rxq_sc = sc;
   5565 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5566 
   5567 		error = wm_alloc_rx_descs(sc, rxq);
   5568 		if (error)
   5569 			break;
   5570 
   5571 		error = wm_alloc_rx_buffer(sc, rxq);
   5572 		if (error) {
   5573 			wm_free_rx_descs(sc, rxq);
   5574 			break;
   5575 		}
   5576 
   5577 		rx_done++;
   5578 	}
   5579 	if (error)
   5580 		goto fail_2;
   5581 
   5582 	return 0;
   5583 
   5584  fail_2:
   5585 	for (i = 0; i < rx_done; i++) {
   5586 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5587 		wm_free_rx_buffer(sc, rxq);
   5588 		wm_free_rx_descs(sc, rxq);
   5589 		if (rxq->rxq_lock)
   5590 			mutex_obj_free(rxq->rxq_lock);
   5591 	}
   5592  fail_1:
   5593 	for (i = 0; i < tx_done; i++) {
   5594 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5595 		pcq_destroy(txq->txq_interq);
   5596 		wm_free_tx_buffer(sc, txq);
   5597 		wm_free_tx_descs(sc, txq);
   5598 		if (txq->txq_lock)
   5599 			mutex_obj_free(txq->txq_lock);
   5600 	}
   5601 
   5602 	kmem_free(sc->sc_queue,
   5603 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5604  fail_0:
   5605 	return error;
   5606 }
   5607 
   5608 /*
   5609  * wm_free_quques:
   5610  *	Free {tx,rx}descs and {tx,rx} buffers
   5611  */
   5612 static void
   5613 wm_free_txrx_queues(struct wm_softc *sc)
   5614 {
   5615 	int i;
   5616 
   5617 	for (i = 0; i < sc->sc_nqueues; i++) {
   5618 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5619 		wm_free_rx_buffer(sc, rxq);
   5620 		wm_free_rx_descs(sc, rxq);
   5621 		if (rxq->rxq_lock)
   5622 			mutex_obj_free(rxq->rxq_lock);
   5623 	}
   5624 
   5625 	for (i = 0; i < sc->sc_nqueues; i++) {
   5626 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5627 		wm_free_tx_buffer(sc, txq);
   5628 		wm_free_tx_descs(sc, txq);
   5629 		if (txq->txq_lock)
   5630 			mutex_obj_free(txq->txq_lock);
   5631 	}
   5632 
   5633 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5634 }
   5635 
   5636 static void
   5637 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5638 {
   5639 
   5640 	KASSERT(mutex_owned(txq->txq_lock));
   5641 
   5642 	/* Initialize the transmit descriptor ring. */
   5643 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5644 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5645 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5646 	txq->txq_free = WM_NTXDESC(txq);
   5647 	txq->txq_next = 0;
   5648 }
   5649 
   5650 static void
   5651 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5652     struct wm_txqueue *txq)
   5653 {
   5654 
   5655 	KASSERT(mutex_owned(txq->txq_lock));
   5656 
   5657 	if (sc->sc_type < WM_T_82543) {
   5658 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5659 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5660 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5661 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5662 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5663 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5664 	} else {
   5665 		int qid = wmq->wmq_id;
   5666 
   5667 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5668 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5669 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5670 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5671 
   5672 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5673 			/*
   5674 			 * Don't write TDT before TCTL.EN is set.
   5675 			 * See the document.
   5676 			 */
   5677 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5678 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5679 			    | TXDCTL_WTHRESH(0));
   5680 		else {
   5681 			/* ITR / 4 */
   5682 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5683 			if (sc->sc_type >= WM_T_82540) {
   5684 				/* should be same */
   5685 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5686 			}
   5687 
   5688 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5689 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5690 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5691 		}
   5692 	}
   5693 }
   5694 
   5695 static void
   5696 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5697 {
   5698 	int i;
   5699 
   5700 	KASSERT(mutex_owned(txq->txq_lock));
   5701 
   5702 	/* Initialize the transmit job descriptors. */
   5703 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5704 		txq->txq_soft[i].txs_mbuf = NULL;
   5705 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5706 	txq->txq_snext = 0;
   5707 	txq->txq_sdirty = 0;
   5708 }
   5709 
   5710 static void
   5711 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5712     struct wm_txqueue *txq)
   5713 {
   5714 
   5715 	KASSERT(mutex_owned(txq->txq_lock));
   5716 
   5717 	/*
   5718 	 * Set up some register offsets that are different between
   5719 	 * the i82542 and the i82543 and later chips.
   5720 	 */
   5721 	if (sc->sc_type < WM_T_82543)
   5722 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5723 	else
   5724 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5725 
   5726 	wm_init_tx_descs(sc, txq);
   5727 	wm_init_tx_regs(sc, wmq, txq);
   5728 	wm_init_tx_buffer(sc, txq);
   5729 }
   5730 
   5731 static void
   5732 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5733     struct wm_rxqueue *rxq)
   5734 {
   5735 
   5736 	KASSERT(mutex_owned(rxq->rxq_lock));
   5737 
   5738 	/*
   5739 	 * Initialize the receive descriptor and receive job
   5740 	 * descriptor rings.
   5741 	 */
   5742 	if (sc->sc_type < WM_T_82543) {
   5743 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5744 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5745 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5746 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5747 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5748 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5749 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5750 
   5751 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5752 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5753 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5754 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5755 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5756 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5757 	} else {
   5758 		int qid = wmq->wmq_id;
   5759 
   5760 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5761 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5762 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5763 
   5764 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5765 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5766 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5767 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5768 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5769 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5770 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5771 			    | RXDCTL_WTHRESH(1));
   5772 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5773 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5774 		} else {
   5775 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5776 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5777 			/* ITR / 4 */
   5778 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5779 			/* MUST be same */
   5780 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5781 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5782 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5783 		}
   5784 	}
   5785 }
   5786 
   5787 static int
   5788 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5789 {
   5790 	struct wm_rxsoft *rxs;
   5791 	int error, i;
   5792 
   5793 	KASSERT(mutex_owned(rxq->rxq_lock));
   5794 
   5795 	for (i = 0; i < WM_NRXDESC; i++) {
   5796 		rxs = &rxq->rxq_soft[i];
   5797 		if (rxs->rxs_mbuf == NULL) {
   5798 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5799 				log(LOG_ERR, "%s: unable to allocate or map "
   5800 				    "rx buffer %d, error = %d\n",
   5801 				    device_xname(sc->sc_dev), i, error);
   5802 				/*
   5803 				 * XXX Should attempt to run with fewer receive
   5804 				 * XXX buffers instead of just failing.
   5805 				 */
   5806 				wm_rxdrain(rxq);
   5807 				return ENOMEM;
   5808 			}
   5809 		} else {
   5810 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5811 				wm_init_rxdesc(rxq, i);
   5812 			/*
   5813 			 * For 82575 and newer device, the RX descriptors
   5814 			 * must be initialized after the setting of RCTL.EN in
   5815 			 * wm_set_filter()
   5816 			 */
   5817 		}
   5818 	}
   5819 	rxq->rxq_ptr = 0;
   5820 	rxq->rxq_discard = 0;
   5821 	WM_RXCHAIN_RESET(rxq);
   5822 
   5823 	return 0;
   5824 }
   5825 
   5826 static int
   5827 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5828     struct wm_rxqueue *rxq)
   5829 {
   5830 
   5831 	KASSERT(mutex_owned(rxq->rxq_lock));
   5832 
   5833 	/*
   5834 	 * Set up some register offsets that are different between
   5835 	 * the i82542 and the i82543 and later chips.
   5836 	 */
   5837 	if (sc->sc_type < WM_T_82543)
   5838 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5839 	else
   5840 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   5841 
   5842 	wm_init_rx_regs(sc, wmq, rxq);
   5843 	return wm_init_rx_buffer(sc, rxq);
   5844 }
   5845 
   5846 /*
   5847  * wm_init_quques:
   5848  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5849  */
   5850 static int
   5851 wm_init_txrx_queues(struct wm_softc *sc)
   5852 {
   5853 	int i, error = 0;
   5854 
   5855 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5856 		device_xname(sc->sc_dev), __func__));
   5857 	for (i = 0; i < sc->sc_nqueues; i++) {
   5858 		struct wm_queue *wmq = &sc->sc_queue[i];
   5859 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5860 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5861 
   5862 		mutex_enter(txq->txq_lock);
   5863 		wm_init_tx_queue(sc, wmq, txq);
   5864 		mutex_exit(txq->txq_lock);
   5865 
   5866 		mutex_enter(rxq->rxq_lock);
   5867 		error = wm_init_rx_queue(sc, wmq, rxq);
   5868 		mutex_exit(rxq->rxq_lock);
   5869 		if (error)
   5870 			break;
   5871 	}
   5872 
   5873 	return error;
   5874 }
   5875 
   5876 /*
   5877  * wm_tx_offload:
   5878  *
   5879  *	Set up TCP/IP checksumming parameters for the
   5880  *	specified packet.
   5881  */
   5882 static int
   5883 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5884     uint8_t *fieldsp)
   5885 {
   5886 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5887 	struct mbuf *m0 = txs->txs_mbuf;
   5888 	struct livengood_tcpip_ctxdesc *t;
   5889 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5890 	uint32_t ipcse;
   5891 	struct ether_header *eh;
   5892 	int offset, iphl;
   5893 	uint8_t fields;
   5894 
   5895 	/*
   5896 	 * XXX It would be nice if the mbuf pkthdr had offset
   5897 	 * fields for the protocol headers.
   5898 	 */
   5899 
   5900 	eh = mtod(m0, struct ether_header *);
   5901 	switch (htons(eh->ether_type)) {
   5902 	case ETHERTYPE_IP:
   5903 	case ETHERTYPE_IPV6:
   5904 		offset = ETHER_HDR_LEN;
   5905 		break;
   5906 
   5907 	case ETHERTYPE_VLAN:
   5908 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5909 		break;
   5910 
   5911 	default:
   5912 		/*
   5913 		 * Don't support this protocol or encapsulation.
   5914 		 */
   5915 		*fieldsp = 0;
   5916 		*cmdp = 0;
   5917 		return 0;
   5918 	}
   5919 
   5920 	if ((m0->m_pkthdr.csum_flags &
   5921 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5922 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5923 	} else {
   5924 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5925 	}
   5926 	ipcse = offset + iphl - 1;
   5927 
   5928 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5929 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5930 	seg = 0;
   5931 	fields = 0;
   5932 
   5933 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5934 		int hlen = offset + iphl;
   5935 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5936 
   5937 		if (__predict_false(m0->m_len <
   5938 				    (hlen + sizeof(struct tcphdr)))) {
   5939 			/*
   5940 			 * TCP/IP headers are not in the first mbuf; we need
   5941 			 * to do this the slow and painful way.  Let's just
   5942 			 * hope this doesn't happen very often.
   5943 			 */
   5944 			struct tcphdr th;
   5945 
   5946 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5947 
   5948 			m_copydata(m0, hlen, sizeof(th), &th);
   5949 			if (v4) {
   5950 				struct ip ip;
   5951 
   5952 				m_copydata(m0, offset, sizeof(ip), &ip);
   5953 				ip.ip_len = 0;
   5954 				m_copyback(m0,
   5955 				    offset + offsetof(struct ip, ip_len),
   5956 				    sizeof(ip.ip_len), &ip.ip_len);
   5957 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5958 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5959 			} else {
   5960 				struct ip6_hdr ip6;
   5961 
   5962 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5963 				ip6.ip6_plen = 0;
   5964 				m_copyback(m0,
   5965 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5966 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5967 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5968 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5969 			}
   5970 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5971 			    sizeof(th.th_sum), &th.th_sum);
   5972 
   5973 			hlen += th.th_off << 2;
   5974 		} else {
   5975 			/*
   5976 			 * TCP/IP headers are in the first mbuf; we can do
   5977 			 * this the easy way.
   5978 			 */
   5979 			struct tcphdr *th;
   5980 
   5981 			if (v4) {
   5982 				struct ip *ip =
   5983 				    (void *)(mtod(m0, char *) + offset);
   5984 				th = (void *)(mtod(m0, char *) + hlen);
   5985 
   5986 				ip->ip_len = 0;
   5987 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5988 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5989 			} else {
   5990 				struct ip6_hdr *ip6 =
   5991 				    (void *)(mtod(m0, char *) + offset);
   5992 				th = (void *)(mtod(m0, char *) + hlen);
   5993 
   5994 				ip6->ip6_plen = 0;
   5995 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5996 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5997 			}
   5998 			hlen += th->th_off << 2;
   5999 		}
   6000 
   6001 		if (v4) {
   6002 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6003 			cmdlen |= WTX_TCPIP_CMD_IP;
   6004 		} else {
   6005 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6006 			ipcse = 0;
   6007 		}
   6008 		cmd |= WTX_TCPIP_CMD_TSE;
   6009 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6010 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6011 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6012 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6013 	}
   6014 
   6015 	/*
   6016 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6017 	 * offload feature, if we load the context descriptor, we
   6018 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6019 	 */
   6020 
   6021 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6022 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6023 	    WTX_TCPIP_IPCSE(ipcse);
   6024 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6025 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   6026 		fields |= WTX_IXSM;
   6027 	}
   6028 
   6029 	offset += iphl;
   6030 
   6031 	if (m0->m_pkthdr.csum_flags &
   6032 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6033 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6034 		fields |= WTX_TXSM;
   6035 		tucs = WTX_TCPIP_TUCSS(offset) |
   6036 		    WTX_TCPIP_TUCSO(offset +
   6037 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6038 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6039 	} else if ((m0->m_pkthdr.csum_flags &
   6040 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6041 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6042 		fields |= WTX_TXSM;
   6043 		tucs = WTX_TCPIP_TUCSS(offset) |
   6044 		    WTX_TCPIP_TUCSO(offset +
   6045 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6046 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6047 	} else {
   6048 		/* Just initialize it to a valid TCP context. */
   6049 		tucs = WTX_TCPIP_TUCSS(offset) |
   6050 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6051 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6052 	}
   6053 
   6054 	/* Fill in the context descriptor. */
   6055 	t = (struct livengood_tcpip_ctxdesc *)
   6056 	    &txq->txq_descs[txq->txq_next];
   6057 	t->tcpip_ipcs = htole32(ipcs);
   6058 	t->tcpip_tucs = htole32(tucs);
   6059 	t->tcpip_cmdlen = htole32(cmdlen);
   6060 	t->tcpip_seg = htole32(seg);
   6061 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6062 
   6063 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6064 	txs->txs_ndesc++;
   6065 
   6066 	*cmdp = cmd;
   6067 	*fieldsp = fields;
   6068 
   6069 	return 0;
   6070 }
   6071 
   6072 /*
   6073  * wm_start:		[ifnet interface function]
   6074  *
   6075  *	Start packet transmission on the interface.
   6076  */
   6077 static void
   6078 wm_start(struct ifnet *ifp)
   6079 {
   6080 	struct wm_softc *sc = ifp->if_softc;
   6081 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6082 
   6083 	mutex_enter(txq->txq_lock);
   6084 	if (!sc->sc_stopping)
   6085 		wm_start_locked(ifp);
   6086 	mutex_exit(txq->txq_lock);
   6087 }
   6088 
   6089 static void
   6090 wm_start_locked(struct ifnet *ifp)
   6091 {
   6092 	struct wm_softc *sc = ifp->if_softc;
   6093 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6094 	struct mbuf *m0;
   6095 	struct m_tag *mtag;
   6096 	struct wm_txsoft *txs;
   6097 	bus_dmamap_t dmamap;
   6098 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6099 	bus_addr_t curaddr;
   6100 	bus_size_t seglen, curlen;
   6101 	uint32_t cksumcmd;
   6102 	uint8_t cksumfields;
   6103 
   6104 	KASSERT(mutex_owned(txq->txq_lock));
   6105 
   6106 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6107 		return;
   6108 
   6109 	/* Remember the previous number of free descriptors. */
   6110 	ofree = txq->txq_free;
   6111 
   6112 	/*
   6113 	 * Loop through the send queue, setting up transmit descriptors
   6114 	 * until we drain the queue, or use up all available transmit
   6115 	 * descriptors.
   6116 	 */
   6117 	for (;;) {
   6118 		m0 = NULL;
   6119 
   6120 		/* Get a work queue entry. */
   6121 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6122 			wm_txeof(sc, txq);
   6123 			if (txq->txq_sfree == 0) {
   6124 				DPRINTF(WM_DEBUG_TX,
   6125 				    ("%s: TX: no free job descriptors\n",
   6126 					device_xname(sc->sc_dev)));
   6127 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6128 				break;
   6129 			}
   6130 		}
   6131 
   6132 		/* Grab a packet off the queue. */
   6133 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6134 		if (m0 == NULL)
   6135 			break;
   6136 
   6137 		DPRINTF(WM_DEBUG_TX,
   6138 		    ("%s: TX: have packet to transmit: %p\n",
   6139 		    device_xname(sc->sc_dev), m0));
   6140 
   6141 		txs = &txq->txq_soft[txq->txq_snext];
   6142 		dmamap = txs->txs_dmamap;
   6143 
   6144 		use_tso = (m0->m_pkthdr.csum_flags &
   6145 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6146 
   6147 		/*
   6148 		 * So says the Linux driver:
   6149 		 * The controller does a simple calculation to make sure
   6150 		 * there is enough room in the FIFO before initiating the
   6151 		 * DMA for each buffer.  The calc is:
   6152 		 *	4 = ceil(buffer len / MSS)
   6153 		 * To make sure we don't overrun the FIFO, adjust the max
   6154 		 * buffer len if the MSS drops.
   6155 		 */
   6156 		dmamap->dm_maxsegsz =
   6157 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6158 		    ? m0->m_pkthdr.segsz << 2
   6159 		    : WTX_MAX_LEN;
   6160 
   6161 		/*
   6162 		 * Load the DMA map.  If this fails, the packet either
   6163 		 * didn't fit in the allotted number of segments, or we
   6164 		 * were short on resources.  For the too-many-segments
   6165 		 * case, we simply report an error and drop the packet,
   6166 		 * since we can't sanely copy a jumbo packet to a single
   6167 		 * buffer.
   6168 		 */
   6169 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6170 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6171 		if (error) {
   6172 			if (error == EFBIG) {
   6173 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6174 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6175 				    "DMA segments, dropping...\n",
   6176 				    device_xname(sc->sc_dev));
   6177 				wm_dump_mbuf_chain(sc, m0);
   6178 				m_freem(m0);
   6179 				continue;
   6180 			}
   6181 			/*  Short on resources, just stop for now. */
   6182 			DPRINTF(WM_DEBUG_TX,
   6183 			    ("%s: TX: dmamap load failed: %d\n",
   6184 			    device_xname(sc->sc_dev), error));
   6185 			break;
   6186 		}
   6187 
   6188 		segs_needed = dmamap->dm_nsegs;
   6189 		if (use_tso) {
   6190 			/* For sentinel descriptor; see below. */
   6191 			segs_needed++;
   6192 		}
   6193 
   6194 		/*
   6195 		 * Ensure we have enough descriptors free to describe
   6196 		 * the packet.  Note, we always reserve one descriptor
   6197 		 * at the end of the ring due to the semantics of the
   6198 		 * TDT register, plus one more in the event we need
   6199 		 * to load offload context.
   6200 		 */
   6201 		if (segs_needed > txq->txq_free - 2) {
   6202 			/*
   6203 			 * Not enough free descriptors to transmit this
   6204 			 * packet.  We haven't committed anything yet,
   6205 			 * so just unload the DMA map, put the packet
   6206 			 * pack on the queue, and punt.  Notify the upper
   6207 			 * layer that there are no more slots left.
   6208 			 */
   6209 			DPRINTF(WM_DEBUG_TX,
   6210 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6211 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6212 			    segs_needed, txq->txq_free - 1));
   6213 			ifp->if_flags |= IFF_OACTIVE;
   6214 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6215 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6216 			break;
   6217 		}
   6218 
   6219 		/*
   6220 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6221 		 * once we know we can transmit the packet, since we
   6222 		 * do some internal FIFO space accounting here.
   6223 		 */
   6224 		if (sc->sc_type == WM_T_82547 &&
   6225 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6226 			DPRINTF(WM_DEBUG_TX,
   6227 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6228 			    device_xname(sc->sc_dev)));
   6229 			ifp->if_flags |= IFF_OACTIVE;
   6230 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6231 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6232 			break;
   6233 		}
   6234 
   6235 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6236 
   6237 		DPRINTF(WM_DEBUG_TX,
   6238 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6239 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6240 
   6241 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6242 
   6243 		/*
   6244 		 * Store a pointer to the packet so that we can free it
   6245 		 * later.
   6246 		 *
   6247 		 * Initially, we consider the number of descriptors the
   6248 		 * packet uses the number of DMA segments.  This may be
   6249 		 * incremented by 1 if we do checksum offload (a descriptor
   6250 		 * is used to set the checksum context).
   6251 		 */
   6252 		txs->txs_mbuf = m0;
   6253 		txs->txs_firstdesc = txq->txq_next;
   6254 		txs->txs_ndesc = segs_needed;
   6255 
   6256 		/* Set up offload parameters for this packet. */
   6257 		if (m0->m_pkthdr.csum_flags &
   6258 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6259 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6260 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6261 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6262 					  &cksumfields) != 0) {
   6263 				/* Error message already displayed. */
   6264 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6265 				continue;
   6266 			}
   6267 		} else {
   6268 			cksumcmd = 0;
   6269 			cksumfields = 0;
   6270 		}
   6271 
   6272 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6273 
   6274 		/* Sync the DMA map. */
   6275 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6276 		    BUS_DMASYNC_PREWRITE);
   6277 
   6278 		/* Initialize the transmit descriptor. */
   6279 		for (nexttx = txq->txq_next, seg = 0;
   6280 		     seg < dmamap->dm_nsegs; seg++) {
   6281 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6282 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6283 			     seglen != 0;
   6284 			     curaddr += curlen, seglen -= curlen,
   6285 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6286 				curlen = seglen;
   6287 
   6288 				/*
   6289 				 * So says the Linux driver:
   6290 				 * Work around for premature descriptor
   6291 				 * write-backs in TSO mode.  Append a
   6292 				 * 4-byte sentinel descriptor.
   6293 				 */
   6294 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6295 				    curlen > 8)
   6296 					curlen -= 4;
   6297 
   6298 				wm_set_dma_addr(
   6299 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6300 				txq->txq_descs[nexttx].wtx_cmdlen
   6301 				    = htole32(cksumcmd | curlen);
   6302 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6303 				    = 0;
   6304 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6305 				    = cksumfields;
   6306 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6307 				lasttx = nexttx;
   6308 
   6309 				DPRINTF(WM_DEBUG_TX,
   6310 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6311 				     "len %#04zx\n",
   6312 				    device_xname(sc->sc_dev), nexttx,
   6313 				    (uint64_t)curaddr, curlen));
   6314 			}
   6315 		}
   6316 
   6317 		KASSERT(lasttx != -1);
   6318 
   6319 		/*
   6320 		 * Set up the command byte on the last descriptor of
   6321 		 * the packet.  If we're in the interrupt delay window,
   6322 		 * delay the interrupt.
   6323 		 */
   6324 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6325 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6326 
   6327 		/*
   6328 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6329 		 * up the descriptor to encapsulate the packet for us.
   6330 		 *
   6331 		 * This is only valid on the last descriptor of the packet.
   6332 		 */
   6333 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6334 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6335 			    htole32(WTX_CMD_VLE);
   6336 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6337 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6338 		}
   6339 
   6340 		txs->txs_lastdesc = lasttx;
   6341 
   6342 		DPRINTF(WM_DEBUG_TX,
   6343 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6344 		    device_xname(sc->sc_dev),
   6345 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6346 
   6347 		/* Sync the descriptors we're using. */
   6348 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6349 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6350 
   6351 		/* Give the packet to the chip. */
   6352 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6353 
   6354 		DPRINTF(WM_DEBUG_TX,
   6355 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6356 
   6357 		DPRINTF(WM_DEBUG_TX,
   6358 		    ("%s: TX: finished transmitting packet, job %d\n",
   6359 		    device_xname(sc->sc_dev), txq->txq_snext));
   6360 
   6361 		/* Advance the tx pointer. */
   6362 		txq->txq_free -= txs->txs_ndesc;
   6363 		txq->txq_next = nexttx;
   6364 
   6365 		txq->txq_sfree--;
   6366 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6367 
   6368 		/* Pass the packet to any BPF listeners. */
   6369 		bpf_mtap(ifp, m0);
   6370 	}
   6371 
   6372 	if (m0 != NULL) {
   6373 		ifp->if_flags |= IFF_OACTIVE;
   6374 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6375 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6376 			__func__));
   6377 		m_freem(m0);
   6378 	}
   6379 
   6380 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6381 		/* No more slots; notify upper layer. */
   6382 		ifp->if_flags |= IFF_OACTIVE;
   6383 	}
   6384 
   6385 	if (txq->txq_free != ofree) {
   6386 		/* Set a watchdog timer in case the chip flakes out. */
   6387 		ifp->if_timer = 5;
   6388 	}
   6389 }
   6390 
   6391 /*
   6392  * wm_nq_tx_offload:
   6393  *
   6394  *	Set up TCP/IP checksumming parameters for the
   6395  *	specified packet, for NEWQUEUE devices
   6396  */
   6397 static int
   6398 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6399     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6400 {
   6401 	struct mbuf *m0 = txs->txs_mbuf;
   6402 	struct m_tag *mtag;
   6403 	uint32_t vl_len, mssidx, cmdc;
   6404 	struct ether_header *eh;
   6405 	int offset, iphl;
   6406 
   6407 	/*
   6408 	 * XXX It would be nice if the mbuf pkthdr had offset
   6409 	 * fields for the protocol headers.
   6410 	 */
   6411 	*cmdlenp = 0;
   6412 	*fieldsp = 0;
   6413 
   6414 	eh = mtod(m0, struct ether_header *);
   6415 	switch (htons(eh->ether_type)) {
   6416 	case ETHERTYPE_IP:
   6417 	case ETHERTYPE_IPV6:
   6418 		offset = ETHER_HDR_LEN;
   6419 		break;
   6420 
   6421 	case ETHERTYPE_VLAN:
   6422 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6423 		break;
   6424 
   6425 	default:
   6426 		/* Don't support this protocol or encapsulation. */
   6427 		*do_csum = false;
   6428 		return 0;
   6429 	}
   6430 	*do_csum = true;
   6431 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6432 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6433 
   6434 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6435 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6436 
   6437 	if ((m0->m_pkthdr.csum_flags &
   6438 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6439 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6440 	} else {
   6441 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6442 	}
   6443 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6444 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6445 
   6446 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6447 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6448 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6449 		*cmdlenp |= NQTX_CMD_VLE;
   6450 	}
   6451 
   6452 	mssidx = 0;
   6453 
   6454 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6455 		int hlen = offset + iphl;
   6456 		int tcp_hlen;
   6457 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6458 
   6459 		if (__predict_false(m0->m_len <
   6460 				    (hlen + sizeof(struct tcphdr)))) {
   6461 			/*
   6462 			 * TCP/IP headers are not in the first mbuf; we need
   6463 			 * to do this the slow and painful way.  Let's just
   6464 			 * hope this doesn't happen very often.
   6465 			 */
   6466 			struct tcphdr th;
   6467 
   6468 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6469 
   6470 			m_copydata(m0, hlen, sizeof(th), &th);
   6471 			if (v4) {
   6472 				struct ip ip;
   6473 
   6474 				m_copydata(m0, offset, sizeof(ip), &ip);
   6475 				ip.ip_len = 0;
   6476 				m_copyback(m0,
   6477 				    offset + offsetof(struct ip, ip_len),
   6478 				    sizeof(ip.ip_len), &ip.ip_len);
   6479 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6480 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6481 			} else {
   6482 				struct ip6_hdr ip6;
   6483 
   6484 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6485 				ip6.ip6_plen = 0;
   6486 				m_copyback(m0,
   6487 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6488 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6489 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6490 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6491 			}
   6492 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6493 			    sizeof(th.th_sum), &th.th_sum);
   6494 
   6495 			tcp_hlen = th.th_off << 2;
   6496 		} else {
   6497 			/*
   6498 			 * TCP/IP headers are in the first mbuf; we can do
   6499 			 * this the easy way.
   6500 			 */
   6501 			struct tcphdr *th;
   6502 
   6503 			if (v4) {
   6504 				struct ip *ip =
   6505 				    (void *)(mtod(m0, char *) + offset);
   6506 				th = (void *)(mtod(m0, char *) + hlen);
   6507 
   6508 				ip->ip_len = 0;
   6509 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6510 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6511 			} else {
   6512 				struct ip6_hdr *ip6 =
   6513 				    (void *)(mtod(m0, char *) + offset);
   6514 				th = (void *)(mtod(m0, char *) + hlen);
   6515 
   6516 				ip6->ip6_plen = 0;
   6517 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6518 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6519 			}
   6520 			tcp_hlen = th->th_off << 2;
   6521 		}
   6522 		hlen += tcp_hlen;
   6523 		*cmdlenp |= NQTX_CMD_TSE;
   6524 
   6525 		if (v4) {
   6526 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6527 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6528 		} else {
   6529 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6530 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6531 		}
   6532 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6533 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6534 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6535 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6536 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6537 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6538 	} else {
   6539 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6540 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6541 	}
   6542 
   6543 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6544 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6545 		cmdc |= NQTXC_CMD_IP4;
   6546 	}
   6547 
   6548 	if (m0->m_pkthdr.csum_flags &
   6549 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6550 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6551 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6552 			cmdc |= NQTXC_CMD_TCP;
   6553 		} else {
   6554 			cmdc |= NQTXC_CMD_UDP;
   6555 		}
   6556 		cmdc |= NQTXC_CMD_IP4;
   6557 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6558 	}
   6559 	if (m0->m_pkthdr.csum_flags &
   6560 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6561 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6562 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6563 			cmdc |= NQTXC_CMD_TCP;
   6564 		} else {
   6565 			cmdc |= NQTXC_CMD_UDP;
   6566 		}
   6567 		cmdc |= NQTXC_CMD_IP6;
   6568 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6569 	}
   6570 
   6571 	/* Fill in the context descriptor. */
   6572 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6573 	    htole32(vl_len);
   6574 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6575 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6576 	    htole32(cmdc);
   6577 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6578 	    htole32(mssidx);
   6579 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6580 	DPRINTF(WM_DEBUG_TX,
   6581 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6582 	    txq->txq_next, 0, vl_len));
   6583 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6584 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6585 	txs->txs_ndesc++;
   6586 	return 0;
   6587 }
   6588 
   6589 /*
   6590  * wm_nq_start:		[ifnet interface function]
   6591  *
   6592  *	Start packet transmission on the interface for NEWQUEUE devices
   6593  */
   6594 static void
   6595 wm_nq_start(struct ifnet *ifp)
   6596 {
   6597 	struct wm_softc *sc = ifp->if_softc;
   6598 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6599 
   6600 	mutex_enter(txq->txq_lock);
   6601 	if (!sc->sc_stopping)
   6602 		wm_nq_start_locked(ifp);
   6603 	mutex_exit(txq->txq_lock);
   6604 }
   6605 
   6606 static void
   6607 wm_nq_start_locked(struct ifnet *ifp)
   6608 {
   6609 	struct wm_softc *sc = ifp->if_softc;
   6610 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6611 
   6612 	wm_nq_send_common_locked(ifp, txq, false);
   6613 }
   6614 
   6615 static inline int
   6616 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6617 {
   6618 	struct wm_softc *sc = ifp->if_softc;
   6619 	u_int cpuid = cpu_index(curcpu());
   6620 
   6621 	/*
   6622 	 * Currently, simple distribute strategy.
   6623 	 * TODO:
   6624 	 * destribute by flowid(RSS has value).
   6625 	 */
   6626 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6627 }
   6628 
   6629 static int
   6630 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6631 {
   6632 	int qid;
   6633 	struct wm_softc *sc = ifp->if_softc;
   6634 	struct wm_txqueue *txq;
   6635 
   6636 	qid = wm_nq_select_txqueue(ifp, m);
   6637 	txq = &sc->sc_queue[qid].wmq_txq;
   6638 
   6639 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6640 		m_freem(m);
   6641 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6642 		return ENOBUFS;
   6643 	}
   6644 
   6645 	if (mutex_tryenter(txq->txq_lock)) {
   6646 		/* XXXX should be per TX queue */
   6647 		ifp->if_obytes += m->m_pkthdr.len;
   6648 		if (m->m_flags & M_MCAST)
   6649 			ifp->if_omcasts++;
   6650 
   6651 		if (!sc->sc_stopping)
   6652 			wm_nq_transmit_locked(ifp, txq);
   6653 		mutex_exit(txq->txq_lock);
   6654 	}
   6655 
   6656 	return 0;
   6657 }
   6658 
   6659 static void
   6660 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6661 {
   6662 
   6663 	wm_nq_send_common_locked(ifp, txq, true);
   6664 }
   6665 
   6666 static void
   6667 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6668     bool is_transmit)
   6669 {
   6670 	struct wm_softc *sc = ifp->if_softc;
   6671 	struct mbuf *m0;
   6672 	struct m_tag *mtag;
   6673 	struct wm_txsoft *txs;
   6674 	bus_dmamap_t dmamap;
   6675 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6676 	bool do_csum, sent;
   6677 
   6678 	KASSERT(mutex_owned(txq->txq_lock));
   6679 
   6680 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6681 		return;
   6682 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6683 		return;
   6684 
   6685 	sent = false;
   6686 
   6687 	/*
   6688 	 * Loop through the send queue, setting up transmit descriptors
   6689 	 * until we drain the queue, or use up all available transmit
   6690 	 * descriptors.
   6691 	 */
   6692 	for (;;) {
   6693 		m0 = NULL;
   6694 
   6695 		/* Get a work queue entry. */
   6696 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6697 			wm_txeof(sc, txq);
   6698 			if (txq->txq_sfree == 0) {
   6699 				DPRINTF(WM_DEBUG_TX,
   6700 				    ("%s: TX: no free job descriptors\n",
   6701 					device_xname(sc->sc_dev)));
   6702 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6703 				break;
   6704 			}
   6705 		}
   6706 
   6707 		/* Grab a packet off the queue. */
   6708 		if (is_transmit)
   6709 			m0 = pcq_get(txq->txq_interq);
   6710 		else
   6711 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6712 		if (m0 == NULL)
   6713 			break;
   6714 
   6715 		DPRINTF(WM_DEBUG_TX,
   6716 		    ("%s: TX: have packet to transmit: %p\n",
   6717 		    device_xname(sc->sc_dev), m0));
   6718 
   6719 		txs = &txq->txq_soft[txq->txq_snext];
   6720 		dmamap = txs->txs_dmamap;
   6721 
   6722 		/*
   6723 		 * Load the DMA map.  If this fails, the packet either
   6724 		 * didn't fit in the allotted number of segments, or we
   6725 		 * were short on resources.  For the too-many-segments
   6726 		 * case, we simply report an error and drop the packet,
   6727 		 * since we can't sanely copy a jumbo packet to a single
   6728 		 * buffer.
   6729 		 */
   6730 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6731 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6732 		if (error) {
   6733 			if (error == EFBIG) {
   6734 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6735 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6736 				    "DMA segments, dropping...\n",
   6737 				    device_xname(sc->sc_dev));
   6738 				wm_dump_mbuf_chain(sc, m0);
   6739 				m_freem(m0);
   6740 				continue;
   6741 			}
   6742 			/* Short on resources, just stop for now. */
   6743 			DPRINTF(WM_DEBUG_TX,
   6744 			    ("%s: TX: dmamap load failed: %d\n",
   6745 			    device_xname(sc->sc_dev), error));
   6746 			break;
   6747 		}
   6748 
   6749 		segs_needed = dmamap->dm_nsegs;
   6750 
   6751 		/*
   6752 		 * Ensure we have enough descriptors free to describe
   6753 		 * the packet.  Note, we always reserve one descriptor
   6754 		 * at the end of the ring due to the semantics of the
   6755 		 * TDT register, plus one more in the event we need
   6756 		 * to load offload context.
   6757 		 */
   6758 		if (segs_needed > txq->txq_free - 2) {
   6759 			/*
   6760 			 * Not enough free descriptors to transmit this
   6761 			 * packet.  We haven't committed anything yet,
   6762 			 * so just unload the DMA map, put the packet
   6763 			 * pack on the queue, and punt.  Notify the upper
   6764 			 * layer that there are no more slots left.
   6765 			 */
   6766 			DPRINTF(WM_DEBUG_TX,
   6767 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6768 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6769 			    segs_needed, txq->txq_free - 1));
   6770 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6771 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6772 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6773 			break;
   6774 		}
   6775 
   6776 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6777 
   6778 		DPRINTF(WM_DEBUG_TX,
   6779 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6780 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6781 
   6782 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6783 
   6784 		/*
   6785 		 * Store a pointer to the packet so that we can free it
   6786 		 * later.
   6787 		 *
   6788 		 * Initially, we consider the number of descriptors the
   6789 		 * packet uses the number of DMA segments.  This may be
   6790 		 * incremented by 1 if we do checksum offload (a descriptor
   6791 		 * is used to set the checksum context).
   6792 		 */
   6793 		txs->txs_mbuf = m0;
   6794 		txs->txs_firstdesc = txq->txq_next;
   6795 		txs->txs_ndesc = segs_needed;
   6796 
   6797 		/* Set up offload parameters for this packet. */
   6798 		uint32_t cmdlen, fields, dcmdlen;
   6799 		if (m0->m_pkthdr.csum_flags &
   6800 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6801 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6802 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6803 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6804 			    &do_csum) != 0) {
   6805 				/* Error message already displayed. */
   6806 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6807 				continue;
   6808 			}
   6809 		} else {
   6810 			do_csum = false;
   6811 			cmdlen = 0;
   6812 			fields = 0;
   6813 		}
   6814 
   6815 		/* Sync the DMA map. */
   6816 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6817 		    BUS_DMASYNC_PREWRITE);
   6818 
   6819 		/* Initialize the first transmit descriptor. */
   6820 		nexttx = txq->txq_next;
   6821 		if (!do_csum) {
   6822 			/* setup a legacy descriptor */
   6823 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6824 			    dmamap->dm_segs[0].ds_addr);
   6825 			txq->txq_descs[nexttx].wtx_cmdlen =
   6826 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6827 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6828 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6829 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6830 			    NULL) {
   6831 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6832 				    htole32(WTX_CMD_VLE);
   6833 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6834 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6835 			} else {
   6836 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6837 			}
   6838 			dcmdlen = 0;
   6839 		} else {
   6840 			/* setup an advanced data descriptor */
   6841 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6842 			    htole64(dmamap->dm_segs[0].ds_addr);
   6843 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6844 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6845 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6846 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6847 			    htole32(fields);
   6848 			DPRINTF(WM_DEBUG_TX,
   6849 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6850 			    device_xname(sc->sc_dev), nexttx,
   6851 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6852 			DPRINTF(WM_DEBUG_TX,
   6853 			    ("\t 0x%08x%08x\n", fields,
   6854 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6855 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6856 		}
   6857 
   6858 		lasttx = nexttx;
   6859 		nexttx = WM_NEXTTX(txq, nexttx);
   6860 		/*
   6861 		 * fill in the next descriptors. legacy or adcanced format
   6862 		 * is the same here
   6863 		 */
   6864 		for (seg = 1; seg < dmamap->dm_nsegs;
   6865 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6866 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6867 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6868 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6869 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6870 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6871 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6872 			lasttx = nexttx;
   6873 
   6874 			DPRINTF(WM_DEBUG_TX,
   6875 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6876 			     "len %#04zx\n",
   6877 			    device_xname(sc->sc_dev), nexttx,
   6878 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6879 			    dmamap->dm_segs[seg].ds_len));
   6880 		}
   6881 
   6882 		KASSERT(lasttx != -1);
   6883 
   6884 		/*
   6885 		 * Set up the command byte on the last descriptor of
   6886 		 * the packet.  If we're in the interrupt delay window,
   6887 		 * delay the interrupt.
   6888 		 */
   6889 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6890 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6891 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6892 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6893 
   6894 		txs->txs_lastdesc = lasttx;
   6895 
   6896 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6897 		    device_xname(sc->sc_dev),
   6898 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6899 
   6900 		/* Sync the descriptors we're using. */
   6901 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6902 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6903 
   6904 		/* Give the packet to the chip. */
   6905 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6906 		sent = true;
   6907 
   6908 		DPRINTF(WM_DEBUG_TX,
   6909 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6910 
   6911 		DPRINTF(WM_DEBUG_TX,
   6912 		    ("%s: TX: finished transmitting packet, job %d\n",
   6913 		    device_xname(sc->sc_dev), txq->txq_snext));
   6914 
   6915 		/* Advance the tx pointer. */
   6916 		txq->txq_free -= txs->txs_ndesc;
   6917 		txq->txq_next = nexttx;
   6918 
   6919 		txq->txq_sfree--;
   6920 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6921 
   6922 		/* Pass the packet to any BPF listeners. */
   6923 		bpf_mtap(ifp, m0);
   6924 	}
   6925 
   6926 	if (m0 != NULL) {
   6927 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6928 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6929 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6930 			__func__));
   6931 		m_freem(m0);
   6932 	}
   6933 
   6934 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6935 		/* No more slots; notify upper layer. */
   6936 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6937 	}
   6938 
   6939 	if (sent) {
   6940 		/* Set a watchdog timer in case the chip flakes out. */
   6941 		ifp->if_timer = 5;
   6942 	}
   6943 }
   6944 
   6945 /* Interrupt */
   6946 
   6947 /*
   6948  * wm_txeof:
   6949  *
   6950  *	Helper; handle transmit interrupts.
   6951  */
   6952 static int
   6953 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   6954 {
   6955 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6956 	struct wm_txsoft *txs;
   6957 	bool processed = false;
   6958 	int count = 0;
   6959 	int i;
   6960 	uint8_t status;
   6961 
   6962 	KASSERT(mutex_owned(txq->txq_lock));
   6963 
   6964 	if (sc->sc_stopping)
   6965 		return 0;
   6966 
   6967 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6968 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   6969 	else
   6970 		ifp->if_flags &= ~IFF_OACTIVE;
   6971 
   6972 	/*
   6973 	 * Go through the Tx list and free mbufs for those
   6974 	 * frames which have been transmitted.
   6975 	 */
   6976 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6977 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6978 		txs = &txq->txq_soft[i];
   6979 
   6980 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   6981 			device_xname(sc->sc_dev), i));
   6982 
   6983 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6984 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   6985 
   6986 		status =
   6987 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6988 		if ((status & WTX_ST_DD) == 0) {
   6989 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6990 			    BUS_DMASYNC_PREREAD);
   6991 			break;
   6992 		}
   6993 
   6994 		processed = true;
   6995 		count++;
   6996 		DPRINTF(WM_DEBUG_TX,
   6997 		    ("%s: TX: job %d done: descs %d..%d\n",
   6998 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6999 		    txs->txs_lastdesc));
   7000 
   7001 		/*
   7002 		 * XXX We should probably be using the statistics
   7003 		 * XXX registers, but I don't know if they exist
   7004 		 * XXX on chips before the i82544.
   7005 		 */
   7006 
   7007 #ifdef WM_EVENT_COUNTERS
   7008 		if (status & WTX_ST_TU)
   7009 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   7010 #endif /* WM_EVENT_COUNTERS */
   7011 
   7012 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7013 			ifp->if_oerrors++;
   7014 			if (status & WTX_ST_LC)
   7015 				log(LOG_WARNING, "%s: late collision\n",
   7016 				    device_xname(sc->sc_dev));
   7017 			else if (status & WTX_ST_EC) {
   7018 				ifp->if_collisions += 16;
   7019 				log(LOG_WARNING, "%s: excessive collisions\n",
   7020 				    device_xname(sc->sc_dev));
   7021 			}
   7022 		} else
   7023 			ifp->if_opackets++;
   7024 
   7025 		txq->txq_free += txs->txs_ndesc;
   7026 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7027 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7028 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7029 		m_freem(txs->txs_mbuf);
   7030 		txs->txs_mbuf = NULL;
   7031 	}
   7032 
   7033 	/* Update the dirty transmit buffer pointer. */
   7034 	txq->txq_sdirty = i;
   7035 	DPRINTF(WM_DEBUG_TX,
   7036 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7037 
   7038 	if (count != 0)
   7039 		rnd_add_uint32(&sc->rnd_source, count);
   7040 
   7041 	/*
   7042 	 * If there are no more pending transmissions, cancel the watchdog
   7043 	 * timer.
   7044 	 */
   7045 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7046 		ifp->if_timer = 0;
   7047 
   7048 	return processed;
   7049 }
   7050 
   7051 /*
   7052  * wm_rxeof:
   7053  *
   7054  *	Helper; handle receive interrupts.
   7055  */
   7056 static void
   7057 wm_rxeof(struct wm_rxqueue *rxq)
   7058 {
   7059 	struct wm_softc *sc = rxq->rxq_sc;
   7060 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7061 	struct wm_rxsoft *rxs;
   7062 	struct mbuf *m;
   7063 	int i, len;
   7064 	int count = 0;
   7065 	uint8_t status, errors;
   7066 	uint16_t vlantag;
   7067 
   7068 	KASSERT(mutex_owned(rxq->rxq_lock));
   7069 
   7070 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7071 		rxs = &rxq->rxq_soft[i];
   7072 
   7073 		DPRINTF(WM_DEBUG_RX,
   7074 		    ("%s: RX: checking descriptor %d\n",
   7075 		    device_xname(sc->sc_dev), i));
   7076 
   7077 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7078 
   7079 		status = rxq->rxq_descs[i].wrx_status;
   7080 		errors = rxq->rxq_descs[i].wrx_errors;
   7081 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7082 		vlantag = rxq->rxq_descs[i].wrx_special;
   7083 
   7084 		if ((status & WRX_ST_DD) == 0) {
   7085 			/* We have processed all of the receive descriptors. */
   7086 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7087 			break;
   7088 		}
   7089 
   7090 		count++;
   7091 		if (__predict_false(rxq->rxq_discard)) {
   7092 			DPRINTF(WM_DEBUG_RX,
   7093 			    ("%s: RX: discarding contents of descriptor %d\n",
   7094 			    device_xname(sc->sc_dev), i));
   7095 			wm_init_rxdesc(rxq, i);
   7096 			if (status & WRX_ST_EOP) {
   7097 				/* Reset our state. */
   7098 				DPRINTF(WM_DEBUG_RX,
   7099 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7100 				    device_xname(sc->sc_dev)));
   7101 				rxq->rxq_discard = 0;
   7102 			}
   7103 			continue;
   7104 		}
   7105 
   7106 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7107 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7108 
   7109 		m = rxs->rxs_mbuf;
   7110 
   7111 		/*
   7112 		 * Add a new receive buffer to the ring, unless of
   7113 		 * course the length is zero. Treat the latter as a
   7114 		 * failed mapping.
   7115 		 */
   7116 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7117 			/*
   7118 			 * Failed, throw away what we've done so
   7119 			 * far, and discard the rest of the packet.
   7120 			 */
   7121 			ifp->if_ierrors++;
   7122 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7123 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7124 			wm_init_rxdesc(rxq, i);
   7125 			if ((status & WRX_ST_EOP) == 0)
   7126 				rxq->rxq_discard = 1;
   7127 			if (rxq->rxq_head != NULL)
   7128 				m_freem(rxq->rxq_head);
   7129 			WM_RXCHAIN_RESET(rxq);
   7130 			DPRINTF(WM_DEBUG_RX,
   7131 			    ("%s: RX: Rx buffer allocation failed, "
   7132 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7133 			    rxq->rxq_discard ? " (discard)" : ""));
   7134 			continue;
   7135 		}
   7136 
   7137 		m->m_len = len;
   7138 		rxq->rxq_len += len;
   7139 		DPRINTF(WM_DEBUG_RX,
   7140 		    ("%s: RX: buffer at %p len %d\n",
   7141 		    device_xname(sc->sc_dev), m->m_data, len));
   7142 
   7143 		/* If this is not the end of the packet, keep looking. */
   7144 		if ((status & WRX_ST_EOP) == 0) {
   7145 			WM_RXCHAIN_LINK(rxq, m);
   7146 			DPRINTF(WM_DEBUG_RX,
   7147 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7148 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7149 			continue;
   7150 		}
   7151 
   7152 		/*
   7153 		 * Okay, we have the entire packet now.  The chip is
   7154 		 * configured to include the FCS except I350 and I21[01]
   7155 		 * (not all chips can be configured to strip it),
   7156 		 * so we need to trim it.
   7157 		 * May need to adjust length of previous mbuf in the
   7158 		 * chain if the current mbuf is too short.
   7159 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7160 		 * is always set in I350, so we don't trim it.
   7161 		 */
   7162 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7163 		    && (sc->sc_type != WM_T_I210)
   7164 		    && (sc->sc_type != WM_T_I211)) {
   7165 			if (m->m_len < ETHER_CRC_LEN) {
   7166 				rxq->rxq_tail->m_len
   7167 				    -= (ETHER_CRC_LEN - m->m_len);
   7168 				m->m_len = 0;
   7169 			} else
   7170 				m->m_len -= ETHER_CRC_LEN;
   7171 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7172 		} else
   7173 			len = rxq->rxq_len;
   7174 
   7175 		WM_RXCHAIN_LINK(rxq, m);
   7176 
   7177 		*rxq->rxq_tailp = NULL;
   7178 		m = rxq->rxq_head;
   7179 
   7180 		WM_RXCHAIN_RESET(rxq);
   7181 
   7182 		DPRINTF(WM_DEBUG_RX,
   7183 		    ("%s: RX: have entire packet, len -> %d\n",
   7184 		    device_xname(sc->sc_dev), len));
   7185 
   7186 		/* If an error occurred, update stats and drop the packet. */
   7187 		if (errors &
   7188 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7189 			if (errors & WRX_ER_SE)
   7190 				log(LOG_WARNING, "%s: symbol error\n",
   7191 				    device_xname(sc->sc_dev));
   7192 			else if (errors & WRX_ER_SEQ)
   7193 				log(LOG_WARNING, "%s: receive sequence error\n",
   7194 				    device_xname(sc->sc_dev));
   7195 			else if (errors & WRX_ER_CE)
   7196 				log(LOG_WARNING, "%s: CRC error\n",
   7197 				    device_xname(sc->sc_dev));
   7198 			m_freem(m);
   7199 			continue;
   7200 		}
   7201 
   7202 		/* No errors.  Receive the packet. */
   7203 		m_set_rcvif(m, ifp);
   7204 		m->m_pkthdr.len = len;
   7205 
   7206 		/*
   7207 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7208 		 * for us.  Associate the tag with the packet.
   7209 		 */
   7210 		/* XXXX should check for i350 and i354 */
   7211 		if ((status & WRX_ST_VP) != 0) {
   7212 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7213 		}
   7214 
   7215 		/* Set up checksum info for this packet. */
   7216 		if ((status & WRX_ST_IXSM) == 0) {
   7217 			if (status & WRX_ST_IPCS) {
   7218 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7219 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7220 				if (errors & WRX_ER_IPE)
   7221 					m->m_pkthdr.csum_flags |=
   7222 					    M_CSUM_IPv4_BAD;
   7223 			}
   7224 			if (status & WRX_ST_TCPCS) {
   7225 				/*
   7226 				 * Note: we don't know if this was TCP or UDP,
   7227 				 * so we just set both bits, and expect the
   7228 				 * upper layers to deal.
   7229 				 */
   7230 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7231 				m->m_pkthdr.csum_flags |=
   7232 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7233 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7234 				if (errors & WRX_ER_TCPE)
   7235 					m->m_pkthdr.csum_flags |=
   7236 					    M_CSUM_TCP_UDP_BAD;
   7237 			}
   7238 		}
   7239 
   7240 		ifp->if_ipackets++;
   7241 
   7242 		mutex_exit(rxq->rxq_lock);
   7243 
   7244 		/* Pass this up to any BPF listeners. */
   7245 		bpf_mtap(ifp, m);
   7246 
   7247 		/* Pass it on. */
   7248 		if_percpuq_enqueue(sc->sc_ipq, m);
   7249 
   7250 		mutex_enter(rxq->rxq_lock);
   7251 
   7252 		if (sc->sc_stopping)
   7253 			break;
   7254 	}
   7255 
   7256 	/* Update the receive pointer. */
   7257 	rxq->rxq_ptr = i;
   7258 	if (count != 0)
   7259 		rnd_add_uint32(&sc->rnd_source, count);
   7260 
   7261 	DPRINTF(WM_DEBUG_RX,
   7262 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7263 }
   7264 
   7265 /*
   7266  * wm_linkintr_gmii:
   7267  *
   7268  *	Helper; handle link interrupts for GMII.
   7269  */
   7270 static void
   7271 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7272 {
   7273 
   7274 	KASSERT(WM_CORE_LOCKED(sc));
   7275 
   7276 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7277 		__func__));
   7278 
   7279 	if (icr & ICR_LSC) {
   7280 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7281 
   7282 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7283 			wm_gig_downshift_workaround_ich8lan(sc);
   7284 
   7285 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7286 			device_xname(sc->sc_dev)));
   7287 		mii_pollstat(&sc->sc_mii);
   7288 		if (sc->sc_type == WM_T_82543) {
   7289 			int miistatus, active;
   7290 
   7291 			/*
   7292 			 * With 82543, we need to force speed and
   7293 			 * duplex on the MAC equal to what the PHY
   7294 			 * speed and duplex configuration is.
   7295 			 */
   7296 			miistatus = sc->sc_mii.mii_media_status;
   7297 
   7298 			if (miistatus & IFM_ACTIVE) {
   7299 				active = sc->sc_mii.mii_media_active;
   7300 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7301 				switch (IFM_SUBTYPE(active)) {
   7302 				case IFM_10_T:
   7303 					sc->sc_ctrl |= CTRL_SPEED_10;
   7304 					break;
   7305 				case IFM_100_TX:
   7306 					sc->sc_ctrl |= CTRL_SPEED_100;
   7307 					break;
   7308 				case IFM_1000_T:
   7309 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7310 					break;
   7311 				default:
   7312 					/*
   7313 					 * fiber?
   7314 					 * Shoud not enter here.
   7315 					 */
   7316 					printf("unknown media (%x)\n", active);
   7317 					break;
   7318 				}
   7319 				if (active & IFM_FDX)
   7320 					sc->sc_ctrl |= CTRL_FD;
   7321 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7322 			}
   7323 		} else if ((sc->sc_type == WM_T_ICH8)
   7324 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7325 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7326 		} else if (sc->sc_type == WM_T_PCH) {
   7327 			wm_k1_gig_workaround_hv(sc,
   7328 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7329 		}
   7330 
   7331 		if ((sc->sc_phytype == WMPHY_82578)
   7332 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7333 			== IFM_1000_T)) {
   7334 
   7335 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7336 				delay(200*1000); /* XXX too big */
   7337 
   7338 				/* Link stall fix for link up */
   7339 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7340 				    HV_MUX_DATA_CTRL,
   7341 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7342 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7343 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7344 				    HV_MUX_DATA_CTRL,
   7345 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7346 			}
   7347 		}
   7348 	} else if (icr & ICR_RXSEQ) {
   7349 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7350 			device_xname(sc->sc_dev)));
   7351 	}
   7352 }
   7353 
   7354 /*
   7355  * wm_linkintr_tbi:
   7356  *
   7357  *	Helper; handle link interrupts for TBI mode.
   7358  */
   7359 static void
   7360 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7361 {
   7362 	uint32_t status;
   7363 
   7364 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7365 		__func__));
   7366 
   7367 	status = CSR_READ(sc, WMREG_STATUS);
   7368 	if (icr & ICR_LSC) {
   7369 		if (status & STATUS_LU) {
   7370 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7371 			    device_xname(sc->sc_dev),
   7372 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7373 			/*
   7374 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7375 			 * so we should update sc->sc_ctrl
   7376 			 */
   7377 
   7378 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7379 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7380 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7381 			if (status & STATUS_FD)
   7382 				sc->sc_tctl |=
   7383 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7384 			else
   7385 				sc->sc_tctl |=
   7386 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7387 			if (sc->sc_ctrl & CTRL_TFCE)
   7388 				sc->sc_fcrtl |= FCRTL_XONE;
   7389 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7390 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7391 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7392 				      sc->sc_fcrtl);
   7393 			sc->sc_tbi_linkup = 1;
   7394 		} else {
   7395 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7396 			    device_xname(sc->sc_dev)));
   7397 			sc->sc_tbi_linkup = 0;
   7398 		}
   7399 		/* Update LED */
   7400 		wm_tbi_serdes_set_linkled(sc);
   7401 	} else if (icr & ICR_RXSEQ) {
   7402 		DPRINTF(WM_DEBUG_LINK,
   7403 		    ("%s: LINK: Receive sequence error\n",
   7404 		    device_xname(sc->sc_dev)));
   7405 	}
   7406 }
   7407 
   7408 /*
   7409  * wm_linkintr_serdes:
   7410  *
   7411  *	Helper; handle link interrupts for TBI mode.
   7412  */
   7413 static void
   7414 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7415 {
   7416 	struct mii_data *mii = &sc->sc_mii;
   7417 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7418 	uint32_t pcs_adv, pcs_lpab, reg;
   7419 
   7420 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7421 		__func__));
   7422 
   7423 	if (icr & ICR_LSC) {
   7424 		/* Check PCS */
   7425 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7426 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7427 			mii->mii_media_status |= IFM_ACTIVE;
   7428 			sc->sc_tbi_linkup = 1;
   7429 		} else {
   7430 			mii->mii_media_status |= IFM_NONE;
   7431 			sc->sc_tbi_linkup = 0;
   7432 			wm_tbi_serdes_set_linkled(sc);
   7433 			return;
   7434 		}
   7435 		mii->mii_media_active |= IFM_1000_SX;
   7436 		if ((reg & PCS_LSTS_FDX) != 0)
   7437 			mii->mii_media_active |= IFM_FDX;
   7438 		else
   7439 			mii->mii_media_active |= IFM_HDX;
   7440 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7441 			/* Check flow */
   7442 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7443 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7444 				DPRINTF(WM_DEBUG_LINK,
   7445 				    ("XXX LINKOK but not ACOMP\n"));
   7446 				return;
   7447 			}
   7448 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7449 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7450 			DPRINTF(WM_DEBUG_LINK,
   7451 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7452 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7453 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7454 				mii->mii_media_active |= IFM_FLOW
   7455 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7456 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7457 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7458 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7459 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7460 				mii->mii_media_active |= IFM_FLOW
   7461 				    | IFM_ETH_TXPAUSE;
   7462 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7463 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7464 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7465 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7466 				mii->mii_media_active |= IFM_FLOW
   7467 				    | IFM_ETH_RXPAUSE;
   7468 		}
   7469 		/* Update LED */
   7470 		wm_tbi_serdes_set_linkled(sc);
   7471 	} else {
   7472 		DPRINTF(WM_DEBUG_LINK,
   7473 		    ("%s: LINK: Receive sequence error\n",
   7474 		    device_xname(sc->sc_dev)));
   7475 	}
   7476 }
   7477 
   7478 /*
   7479  * wm_linkintr:
   7480  *
   7481  *	Helper; handle link interrupts.
   7482  */
   7483 static void
   7484 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7485 {
   7486 
   7487 	KASSERT(WM_CORE_LOCKED(sc));
   7488 
   7489 	if (sc->sc_flags & WM_F_HAS_MII)
   7490 		wm_linkintr_gmii(sc, icr);
   7491 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7492 	    && (sc->sc_type >= WM_T_82575))
   7493 		wm_linkintr_serdes(sc, icr);
   7494 	else
   7495 		wm_linkintr_tbi(sc, icr);
   7496 }
   7497 
   7498 /*
   7499  * wm_intr_legacy:
   7500  *
   7501  *	Interrupt service routine for INTx and MSI.
   7502  */
   7503 static int
   7504 wm_intr_legacy(void *arg)
   7505 {
   7506 	struct wm_softc *sc = arg;
   7507 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7508 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7509 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7510 	uint32_t icr, rndval = 0;
   7511 	int handled = 0;
   7512 
   7513 	DPRINTF(WM_DEBUG_TX,
   7514 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7515 	while (1 /* CONSTCOND */) {
   7516 		icr = CSR_READ(sc, WMREG_ICR);
   7517 		if ((icr & sc->sc_icr) == 0)
   7518 			break;
   7519 		if (rndval == 0)
   7520 			rndval = icr;
   7521 
   7522 		mutex_enter(rxq->rxq_lock);
   7523 
   7524 		if (sc->sc_stopping) {
   7525 			mutex_exit(rxq->rxq_lock);
   7526 			break;
   7527 		}
   7528 
   7529 		handled = 1;
   7530 
   7531 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7532 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7533 			DPRINTF(WM_DEBUG_RX,
   7534 			    ("%s: RX: got Rx intr 0x%08x\n",
   7535 			    device_xname(sc->sc_dev),
   7536 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7537 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7538 		}
   7539 #endif
   7540 		wm_rxeof(rxq);
   7541 
   7542 		mutex_exit(rxq->rxq_lock);
   7543 		mutex_enter(txq->txq_lock);
   7544 
   7545 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7546 		if (icr & ICR_TXDW) {
   7547 			DPRINTF(WM_DEBUG_TX,
   7548 			    ("%s: TX: got TXDW interrupt\n",
   7549 			    device_xname(sc->sc_dev)));
   7550 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7551 		}
   7552 #endif
   7553 		wm_txeof(sc, txq);
   7554 
   7555 		mutex_exit(txq->txq_lock);
   7556 		WM_CORE_LOCK(sc);
   7557 
   7558 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7559 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7560 			wm_linkintr(sc, icr);
   7561 		}
   7562 
   7563 		WM_CORE_UNLOCK(sc);
   7564 
   7565 		if (icr & ICR_RXO) {
   7566 #if defined(WM_DEBUG)
   7567 			log(LOG_WARNING, "%s: Receive overrun\n",
   7568 			    device_xname(sc->sc_dev));
   7569 #endif /* defined(WM_DEBUG) */
   7570 		}
   7571 	}
   7572 
   7573 	rnd_add_uint32(&sc->rnd_source, rndval);
   7574 
   7575 	if (handled) {
   7576 		/* Try to get more packets going. */
   7577 		ifp->if_start(ifp);
   7578 	}
   7579 
   7580 	return handled;
   7581 }
   7582 
   7583 static int
   7584 wm_txrxintr_msix(void *arg)
   7585 {
   7586 	struct wm_queue *wmq = arg;
   7587 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7588 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7589 	struct wm_softc *sc = txq->txq_sc;
   7590 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7591 
   7592 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7593 
   7594 	DPRINTF(WM_DEBUG_TX,
   7595 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7596 
   7597 	if (sc->sc_type == WM_T_82574)
   7598 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7599 	else if (sc->sc_type == WM_T_82575)
   7600 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7601 	else
   7602 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7603 
   7604 	if (!sc->sc_stopping) {
   7605 		mutex_enter(txq->txq_lock);
   7606 
   7607 		WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7608 		wm_txeof(sc, txq);
   7609 
   7610 		/* Try to get more packets going. */
   7611 		if (pcq_peek(txq->txq_interq) != NULL)
   7612 			wm_nq_transmit_locked(ifp, txq);
   7613 		/*
   7614 		 * There are still some upper layer processing which call
   7615 		 * ifp->if_start(). e.g. ALTQ
   7616 		 */
   7617 		if (wmq->wmq_id == 0) {
   7618 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7619 				wm_nq_start_locked(ifp);
   7620 		}
   7621 		mutex_exit(txq->txq_lock);
   7622 	}
   7623 
   7624 	DPRINTF(WM_DEBUG_RX,
   7625 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7626 
   7627 	if (!sc->sc_stopping) {
   7628 		mutex_enter(rxq->rxq_lock);
   7629 		WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7630 		wm_rxeof(rxq);
   7631 		mutex_exit(rxq->rxq_lock);
   7632 	}
   7633 
   7634 	if (sc->sc_type == WM_T_82574)
   7635 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7636 	else if (sc->sc_type == WM_T_82575)
   7637 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7638 	else
   7639 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7640 
   7641 	return 1;
   7642 }
   7643 
   7644 /*
   7645  * wm_linkintr_msix:
   7646  *
   7647  *	Interrupt service routine for link status change for MSI-X.
   7648  */
   7649 static int
   7650 wm_linkintr_msix(void *arg)
   7651 {
   7652 	struct wm_softc *sc = arg;
   7653 	uint32_t reg;
   7654 
   7655 	DPRINTF(WM_DEBUG_LINK,
   7656 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7657 
   7658 	reg = CSR_READ(sc, WMREG_ICR);
   7659 	WM_CORE_LOCK(sc);
   7660 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7661 		goto out;
   7662 
   7663 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7664 	wm_linkintr(sc, ICR_LSC);
   7665 
   7666 out:
   7667 	WM_CORE_UNLOCK(sc);
   7668 
   7669 	if (sc->sc_type == WM_T_82574)
   7670 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7671 	else if (sc->sc_type == WM_T_82575)
   7672 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7673 	else
   7674 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7675 
   7676 	return 1;
   7677 }
   7678 
   7679 /*
   7680  * Media related.
   7681  * GMII, SGMII, TBI (and SERDES)
   7682  */
   7683 
   7684 /* Common */
   7685 
   7686 /*
   7687  * wm_tbi_serdes_set_linkled:
   7688  *
   7689  *	Update the link LED on TBI and SERDES devices.
   7690  */
   7691 static void
   7692 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7693 {
   7694 
   7695 	if (sc->sc_tbi_linkup)
   7696 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7697 	else
   7698 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7699 
   7700 	/* 82540 or newer devices are active low */
   7701 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7702 
   7703 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7704 }
   7705 
   7706 /* GMII related */
   7707 
   7708 /*
   7709  * wm_gmii_reset:
   7710  *
   7711  *	Reset the PHY.
   7712  */
   7713 static void
   7714 wm_gmii_reset(struct wm_softc *sc)
   7715 {
   7716 	uint32_t reg;
   7717 	int rv;
   7718 
   7719 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7720 		device_xname(sc->sc_dev), __func__));
   7721 	/* get phy semaphore */
   7722 	switch (sc->sc_type) {
   7723 	case WM_T_82571:
   7724 	case WM_T_82572:
   7725 	case WM_T_82573:
   7726 	case WM_T_82574:
   7727 	case WM_T_82583:
   7728 		 /* XXX should get sw semaphore, too */
   7729 		rv = wm_get_swsm_semaphore(sc);
   7730 		break;
   7731 	case WM_T_82575:
   7732 	case WM_T_82576:
   7733 	case WM_T_82580:
   7734 	case WM_T_I350:
   7735 	case WM_T_I354:
   7736 	case WM_T_I210:
   7737 	case WM_T_I211:
   7738 	case WM_T_80003:
   7739 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7740 		break;
   7741 	case WM_T_ICH8:
   7742 	case WM_T_ICH9:
   7743 	case WM_T_ICH10:
   7744 	case WM_T_PCH:
   7745 	case WM_T_PCH2:
   7746 	case WM_T_PCH_LPT:
   7747 	case WM_T_PCH_SPT:
   7748 		rv = wm_get_swfwhw_semaphore(sc);
   7749 		break;
   7750 	default:
   7751 		/* nothing to do*/
   7752 		rv = 0;
   7753 		break;
   7754 	}
   7755 	if (rv != 0) {
   7756 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7757 		    __func__);
   7758 		return;
   7759 	}
   7760 
   7761 	switch (sc->sc_type) {
   7762 	case WM_T_82542_2_0:
   7763 	case WM_T_82542_2_1:
   7764 		/* null */
   7765 		break;
   7766 	case WM_T_82543:
   7767 		/*
   7768 		 * With 82543, we need to force speed and duplex on the MAC
   7769 		 * equal to what the PHY speed and duplex configuration is.
   7770 		 * In addition, we need to perform a hardware reset on the PHY
   7771 		 * to take it out of reset.
   7772 		 */
   7773 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7774 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7775 
   7776 		/* The PHY reset pin is active-low. */
   7777 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7778 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7779 		    CTRL_EXT_SWDPIN(4));
   7780 		reg |= CTRL_EXT_SWDPIO(4);
   7781 
   7782 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7783 		CSR_WRITE_FLUSH(sc);
   7784 		delay(10*1000);
   7785 
   7786 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7787 		CSR_WRITE_FLUSH(sc);
   7788 		delay(150);
   7789 #if 0
   7790 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7791 #endif
   7792 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7793 		break;
   7794 	case WM_T_82544:	/* reset 10000us */
   7795 	case WM_T_82540:
   7796 	case WM_T_82545:
   7797 	case WM_T_82545_3:
   7798 	case WM_T_82546:
   7799 	case WM_T_82546_3:
   7800 	case WM_T_82541:
   7801 	case WM_T_82541_2:
   7802 	case WM_T_82547:
   7803 	case WM_T_82547_2:
   7804 	case WM_T_82571:	/* reset 100us */
   7805 	case WM_T_82572:
   7806 	case WM_T_82573:
   7807 	case WM_T_82574:
   7808 	case WM_T_82575:
   7809 	case WM_T_82576:
   7810 	case WM_T_82580:
   7811 	case WM_T_I350:
   7812 	case WM_T_I354:
   7813 	case WM_T_I210:
   7814 	case WM_T_I211:
   7815 	case WM_T_82583:
   7816 	case WM_T_80003:
   7817 		/* generic reset */
   7818 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7819 		CSR_WRITE_FLUSH(sc);
   7820 		delay(20000);
   7821 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7822 		CSR_WRITE_FLUSH(sc);
   7823 		delay(20000);
   7824 
   7825 		if ((sc->sc_type == WM_T_82541)
   7826 		    || (sc->sc_type == WM_T_82541_2)
   7827 		    || (sc->sc_type == WM_T_82547)
   7828 		    || (sc->sc_type == WM_T_82547_2)) {
   7829 			/* workaround for igp are done in igp_reset() */
   7830 			/* XXX add code to set LED after phy reset */
   7831 		}
   7832 		break;
   7833 	case WM_T_ICH8:
   7834 	case WM_T_ICH9:
   7835 	case WM_T_ICH10:
   7836 	case WM_T_PCH:
   7837 	case WM_T_PCH2:
   7838 	case WM_T_PCH_LPT:
   7839 	case WM_T_PCH_SPT:
   7840 		/* generic reset */
   7841 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7842 		CSR_WRITE_FLUSH(sc);
   7843 		delay(100);
   7844 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7845 		CSR_WRITE_FLUSH(sc);
   7846 		delay(150);
   7847 		break;
   7848 	default:
   7849 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7850 		    __func__);
   7851 		break;
   7852 	}
   7853 
   7854 	/* release PHY semaphore */
   7855 	switch (sc->sc_type) {
   7856 	case WM_T_82571:
   7857 	case WM_T_82572:
   7858 	case WM_T_82573:
   7859 	case WM_T_82574:
   7860 	case WM_T_82583:
   7861 		 /* XXX should put sw semaphore, too */
   7862 		wm_put_swsm_semaphore(sc);
   7863 		break;
   7864 	case WM_T_82575:
   7865 	case WM_T_82576:
   7866 	case WM_T_82580:
   7867 	case WM_T_I350:
   7868 	case WM_T_I354:
   7869 	case WM_T_I210:
   7870 	case WM_T_I211:
   7871 	case WM_T_80003:
   7872 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7873 		break;
   7874 	case WM_T_ICH8:
   7875 	case WM_T_ICH9:
   7876 	case WM_T_ICH10:
   7877 	case WM_T_PCH:
   7878 	case WM_T_PCH2:
   7879 	case WM_T_PCH_LPT:
   7880 	case WM_T_PCH_SPT:
   7881 		wm_put_swfwhw_semaphore(sc);
   7882 		break;
   7883 	default:
   7884 		/* nothing to do */
   7885 		rv = 0;
   7886 		break;
   7887 	}
   7888 
   7889 	/* get_cfg_done */
   7890 	wm_get_cfg_done(sc);
   7891 
   7892 	/* extra setup */
   7893 	switch (sc->sc_type) {
   7894 	case WM_T_82542_2_0:
   7895 	case WM_T_82542_2_1:
   7896 	case WM_T_82543:
   7897 	case WM_T_82544:
   7898 	case WM_T_82540:
   7899 	case WM_T_82545:
   7900 	case WM_T_82545_3:
   7901 	case WM_T_82546:
   7902 	case WM_T_82546_3:
   7903 	case WM_T_82541_2:
   7904 	case WM_T_82547_2:
   7905 	case WM_T_82571:
   7906 	case WM_T_82572:
   7907 	case WM_T_82573:
   7908 	case WM_T_82575:
   7909 	case WM_T_82576:
   7910 	case WM_T_82580:
   7911 	case WM_T_I350:
   7912 	case WM_T_I354:
   7913 	case WM_T_I210:
   7914 	case WM_T_I211:
   7915 	case WM_T_80003:
   7916 		/* null */
   7917 		break;
   7918 	case WM_T_82574:
   7919 	case WM_T_82583:
   7920 		wm_lplu_d0_disable(sc);
   7921 		break;
   7922 	case WM_T_82541:
   7923 	case WM_T_82547:
   7924 		/* XXX Configure actively LED after PHY reset */
   7925 		break;
   7926 	case WM_T_ICH8:
   7927 	case WM_T_ICH9:
   7928 	case WM_T_ICH10:
   7929 	case WM_T_PCH:
   7930 	case WM_T_PCH2:
   7931 	case WM_T_PCH_LPT:
   7932 	case WM_T_PCH_SPT:
   7933 		/* Allow time for h/w to get to a quiescent state afer reset */
   7934 		delay(10*1000);
   7935 
   7936 		if (sc->sc_type == WM_T_PCH)
   7937 			wm_hv_phy_workaround_ich8lan(sc);
   7938 
   7939 		if (sc->sc_type == WM_T_PCH2)
   7940 			wm_lv_phy_workaround_ich8lan(sc);
   7941 
   7942 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7943 			/*
   7944 			 * dummy read to clear the phy wakeup bit after lcd
   7945 			 * reset
   7946 			 */
   7947 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7948 		}
   7949 
   7950 		/*
   7951 		 * XXX Configure the LCD with th extended configuration region
   7952 		 * in NVM
   7953 		 */
   7954 
   7955 		/* Disable D0 LPLU. */
   7956 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7957 			wm_lplu_d0_disable_pch(sc);
   7958 		else
   7959 			wm_lplu_d0_disable(sc);	/* ICH* */
   7960 		break;
   7961 	default:
   7962 		panic("%s: unknown type\n", __func__);
   7963 		break;
   7964 	}
   7965 }
   7966 
   7967 /*
   7968  * wm_get_phy_id_82575:
   7969  *
   7970  * Return PHY ID. Return -1 if it failed.
   7971  */
   7972 static int
   7973 wm_get_phy_id_82575(struct wm_softc *sc)
   7974 {
   7975 	uint32_t reg;
   7976 	int phyid = -1;
   7977 
   7978 	/* XXX */
   7979 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7980 		return -1;
   7981 
   7982 	if (wm_sgmii_uses_mdio(sc)) {
   7983 		switch (sc->sc_type) {
   7984 		case WM_T_82575:
   7985 		case WM_T_82576:
   7986 			reg = CSR_READ(sc, WMREG_MDIC);
   7987 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7988 			break;
   7989 		case WM_T_82580:
   7990 		case WM_T_I350:
   7991 		case WM_T_I354:
   7992 		case WM_T_I210:
   7993 		case WM_T_I211:
   7994 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7995 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7996 			break;
   7997 		default:
   7998 			return -1;
   7999 		}
   8000 	}
   8001 
   8002 	return phyid;
   8003 }
   8004 
   8005 
   8006 /*
   8007  * wm_gmii_mediainit:
   8008  *
   8009  *	Initialize media for use on 1000BASE-T devices.
   8010  */
   8011 static void
   8012 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8013 {
   8014 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8015 	struct mii_data *mii = &sc->sc_mii;
   8016 	uint32_t reg;
   8017 
   8018 	/* We have GMII. */
   8019 	sc->sc_flags |= WM_F_HAS_MII;
   8020 
   8021 	if (sc->sc_type == WM_T_80003)
   8022 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8023 	else
   8024 		sc->sc_tipg = TIPG_1000T_DFLT;
   8025 
   8026 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8027 	if ((sc->sc_type == WM_T_82580)
   8028 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8029 	    || (sc->sc_type == WM_T_I211)) {
   8030 		reg = CSR_READ(sc, WMREG_PHPM);
   8031 		reg &= ~PHPM_GO_LINK_D;
   8032 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8033 	}
   8034 
   8035 	/*
   8036 	 * Let the chip set speed/duplex on its own based on
   8037 	 * signals from the PHY.
   8038 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8039 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8040 	 */
   8041 	sc->sc_ctrl |= CTRL_SLU;
   8042 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8043 
   8044 	/* Initialize our media structures and probe the GMII. */
   8045 	mii->mii_ifp = ifp;
   8046 
   8047 	/*
   8048 	 * Determine the PHY access method.
   8049 	 *
   8050 	 *  For SGMII, use SGMII specific method.
   8051 	 *
   8052 	 *  For some devices, we can determine the PHY access method
   8053 	 * from sc_type.
   8054 	 *
   8055 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8056 	 * access  method by sc_type, so use the PCI product ID for some
   8057 	 * devices.
   8058 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8059 	 * can't detect, then use bm's method.
   8060 	 */
   8061 	switch (prodid) {
   8062 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8063 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8064 		/* 82577 */
   8065 		sc->sc_phytype = WMPHY_82577;
   8066 		break;
   8067 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8068 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8069 		/* 82578 */
   8070 		sc->sc_phytype = WMPHY_82578;
   8071 		break;
   8072 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8073 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8074 		/* 82579 */
   8075 		sc->sc_phytype = WMPHY_82579;
   8076 		break;
   8077 	case PCI_PRODUCT_INTEL_82801I_BM:
   8078 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8079 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8080 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8081 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8082 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8083 		/* 82567 */
   8084 		sc->sc_phytype = WMPHY_BM;
   8085 		mii->mii_readreg = wm_gmii_bm_readreg;
   8086 		mii->mii_writereg = wm_gmii_bm_writereg;
   8087 		break;
   8088 	default:
   8089 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8090 		    && !wm_sgmii_uses_mdio(sc)){
   8091 			/* SGMII */
   8092 			mii->mii_readreg = wm_sgmii_readreg;
   8093 			mii->mii_writereg = wm_sgmii_writereg;
   8094 		} else if (sc->sc_type >= WM_T_80003) {
   8095 			/* 80003 */
   8096 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8097 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8098 		} else if (sc->sc_type >= WM_T_I210) {
   8099 			/* I210 and I211 */
   8100 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8101 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8102 		} else if (sc->sc_type >= WM_T_82580) {
   8103 			/* 82580, I350 and I354 */
   8104 			sc->sc_phytype = WMPHY_82580;
   8105 			mii->mii_readreg = wm_gmii_82580_readreg;
   8106 			mii->mii_writereg = wm_gmii_82580_writereg;
   8107 		} else if (sc->sc_type >= WM_T_82544) {
   8108 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8109 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8110 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8111 		} else {
   8112 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8113 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8114 		}
   8115 		break;
   8116 	}
   8117 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8118 		/* All PCH* use _hv_ */
   8119 		mii->mii_readreg = wm_gmii_hv_readreg;
   8120 		mii->mii_writereg = wm_gmii_hv_writereg;
   8121 	}
   8122 	mii->mii_statchg = wm_gmii_statchg;
   8123 
   8124 	wm_gmii_reset(sc);
   8125 
   8126 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8127 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8128 	    wm_gmii_mediastatus);
   8129 
   8130 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8131 	    || (sc->sc_type == WM_T_82580)
   8132 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8133 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8134 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8135 			/* Attach only one port */
   8136 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8137 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8138 		} else {
   8139 			int i, id;
   8140 			uint32_t ctrl_ext;
   8141 
   8142 			id = wm_get_phy_id_82575(sc);
   8143 			if (id != -1) {
   8144 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8145 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8146 			}
   8147 			if ((id == -1)
   8148 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8149 				/* Power on sgmii phy if it is disabled */
   8150 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8151 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8152 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8153 				CSR_WRITE_FLUSH(sc);
   8154 				delay(300*1000); /* XXX too long */
   8155 
   8156 				/* from 1 to 8 */
   8157 				for (i = 1; i < 8; i++)
   8158 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8159 					    0xffffffff, i, MII_OFFSET_ANY,
   8160 					    MIIF_DOPAUSE);
   8161 
   8162 				/* restore previous sfp cage power state */
   8163 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8164 			}
   8165 		}
   8166 	} else {
   8167 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8168 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8169 	}
   8170 
   8171 	/*
   8172 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8173 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8174 	 */
   8175 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8176 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8177 		wm_set_mdio_slow_mode_hv(sc);
   8178 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8179 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8180 	}
   8181 
   8182 	/*
   8183 	 * (For ICH8 variants)
   8184 	 * If PHY detection failed, use BM's r/w function and retry.
   8185 	 */
   8186 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8187 		/* if failed, retry with *_bm_* */
   8188 		mii->mii_readreg = wm_gmii_bm_readreg;
   8189 		mii->mii_writereg = wm_gmii_bm_writereg;
   8190 
   8191 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8192 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8193 	}
   8194 
   8195 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8196 		/* Any PHY wasn't find */
   8197 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8198 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8199 		sc->sc_phytype = WMPHY_NONE;
   8200 	} else {
   8201 		/*
   8202 		 * PHY Found!
   8203 		 * Check PHY type.
   8204 		 */
   8205 		uint32_t model;
   8206 		struct mii_softc *child;
   8207 
   8208 		child = LIST_FIRST(&mii->mii_phys);
   8209 		model = child->mii_mpd_model;
   8210 		if (model == MII_MODEL_yyINTEL_I82566)
   8211 			sc->sc_phytype = WMPHY_IGP_3;
   8212 
   8213 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8214 	}
   8215 }
   8216 
   8217 /*
   8218  * wm_gmii_mediachange:	[ifmedia interface function]
   8219  *
   8220  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8221  */
   8222 static int
   8223 wm_gmii_mediachange(struct ifnet *ifp)
   8224 {
   8225 	struct wm_softc *sc = ifp->if_softc;
   8226 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8227 	int rc;
   8228 
   8229 	if ((ifp->if_flags & IFF_UP) == 0)
   8230 		return 0;
   8231 
   8232 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8233 	sc->sc_ctrl |= CTRL_SLU;
   8234 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8235 	    || (sc->sc_type > WM_T_82543)) {
   8236 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8237 	} else {
   8238 		sc->sc_ctrl &= ~CTRL_ASDE;
   8239 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8240 		if (ife->ifm_media & IFM_FDX)
   8241 			sc->sc_ctrl |= CTRL_FD;
   8242 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8243 		case IFM_10_T:
   8244 			sc->sc_ctrl |= CTRL_SPEED_10;
   8245 			break;
   8246 		case IFM_100_TX:
   8247 			sc->sc_ctrl |= CTRL_SPEED_100;
   8248 			break;
   8249 		case IFM_1000_T:
   8250 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8251 			break;
   8252 		default:
   8253 			panic("wm_gmii_mediachange: bad media 0x%x",
   8254 			    ife->ifm_media);
   8255 		}
   8256 	}
   8257 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8258 	if (sc->sc_type <= WM_T_82543)
   8259 		wm_gmii_reset(sc);
   8260 
   8261 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8262 		return 0;
   8263 	return rc;
   8264 }
   8265 
   8266 /*
   8267  * wm_gmii_mediastatus:	[ifmedia interface function]
   8268  *
   8269  *	Get the current interface media status on a 1000BASE-T device.
   8270  */
   8271 static void
   8272 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8273 {
   8274 	struct wm_softc *sc = ifp->if_softc;
   8275 
   8276 	ether_mediastatus(ifp, ifmr);
   8277 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8278 	    | sc->sc_flowflags;
   8279 }
   8280 
   8281 #define	MDI_IO		CTRL_SWDPIN(2)
   8282 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8283 #define	MDI_CLK		CTRL_SWDPIN(3)
   8284 
   8285 static void
   8286 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8287 {
   8288 	uint32_t i, v;
   8289 
   8290 	v = CSR_READ(sc, WMREG_CTRL);
   8291 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8292 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8293 
   8294 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8295 		if (data & i)
   8296 			v |= MDI_IO;
   8297 		else
   8298 			v &= ~MDI_IO;
   8299 		CSR_WRITE(sc, WMREG_CTRL, v);
   8300 		CSR_WRITE_FLUSH(sc);
   8301 		delay(10);
   8302 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8303 		CSR_WRITE_FLUSH(sc);
   8304 		delay(10);
   8305 		CSR_WRITE(sc, WMREG_CTRL, v);
   8306 		CSR_WRITE_FLUSH(sc);
   8307 		delay(10);
   8308 	}
   8309 }
   8310 
   8311 static uint32_t
   8312 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8313 {
   8314 	uint32_t v, i, data = 0;
   8315 
   8316 	v = CSR_READ(sc, WMREG_CTRL);
   8317 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8318 	v |= CTRL_SWDPIO(3);
   8319 
   8320 	CSR_WRITE(sc, WMREG_CTRL, v);
   8321 	CSR_WRITE_FLUSH(sc);
   8322 	delay(10);
   8323 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8324 	CSR_WRITE_FLUSH(sc);
   8325 	delay(10);
   8326 	CSR_WRITE(sc, WMREG_CTRL, v);
   8327 	CSR_WRITE_FLUSH(sc);
   8328 	delay(10);
   8329 
   8330 	for (i = 0; i < 16; i++) {
   8331 		data <<= 1;
   8332 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8333 		CSR_WRITE_FLUSH(sc);
   8334 		delay(10);
   8335 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8336 			data |= 1;
   8337 		CSR_WRITE(sc, WMREG_CTRL, v);
   8338 		CSR_WRITE_FLUSH(sc);
   8339 		delay(10);
   8340 	}
   8341 
   8342 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8343 	CSR_WRITE_FLUSH(sc);
   8344 	delay(10);
   8345 	CSR_WRITE(sc, WMREG_CTRL, v);
   8346 	CSR_WRITE_FLUSH(sc);
   8347 	delay(10);
   8348 
   8349 	return data;
   8350 }
   8351 
   8352 #undef MDI_IO
   8353 #undef MDI_DIR
   8354 #undef MDI_CLK
   8355 
   8356 /*
   8357  * wm_gmii_i82543_readreg:	[mii interface function]
   8358  *
   8359  *	Read a PHY register on the GMII (i82543 version).
   8360  */
   8361 static int
   8362 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8363 {
   8364 	struct wm_softc *sc = device_private(self);
   8365 	int rv;
   8366 
   8367 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8368 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8369 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8370 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8371 
   8372 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8373 	    device_xname(sc->sc_dev), phy, reg, rv));
   8374 
   8375 	return rv;
   8376 }
   8377 
   8378 /*
   8379  * wm_gmii_i82543_writereg:	[mii interface function]
   8380  *
   8381  *	Write a PHY register on the GMII (i82543 version).
   8382  */
   8383 static void
   8384 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8385 {
   8386 	struct wm_softc *sc = device_private(self);
   8387 
   8388 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8389 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8390 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8391 	    (MII_COMMAND_START << 30), 32);
   8392 }
   8393 
   8394 /*
   8395  * wm_gmii_i82544_readreg:	[mii interface function]
   8396  *
   8397  *	Read a PHY register on the GMII.
   8398  */
   8399 static int
   8400 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8401 {
   8402 	struct wm_softc *sc = device_private(self);
   8403 	uint32_t mdic = 0;
   8404 	int i, rv;
   8405 
   8406 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8407 	    MDIC_REGADD(reg));
   8408 
   8409 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8410 		mdic = CSR_READ(sc, WMREG_MDIC);
   8411 		if (mdic & MDIC_READY)
   8412 			break;
   8413 		delay(50);
   8414 	}
   8415 
   8416 	if ((mdic & MDIC_READY) == 0) {
   8417 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8418 		    device_xname(sc->sc_dev), phy, reg);
   8419 		rv = 0;
   8420 	} else if (mdic & MDIC_E) {
   8421 #if 0 /* This is normal if no PHY is present. */
   8422 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8423 		    device_xname(sc->sc_dev), phy, reg);
   8424 #endif
   8425 		rv = 0;
   8426 	} else {
   8427 		rv = MDIC_DATA(mdic);
   8428 		if (rv == 0xffff)
   8429 			rv = 0;
   8430 	}
   8431 
   8432 	return rv;
   8433 }
   8434 
   8435 /*
   8436  * wm_gmii_i82544_writereg:	[mii interface function]
   8437  *
   8438  *	Write a PHY register on the GMII.
   8439  */
   8440 static void
   8441 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8442 {
   8443 	struct wm_softc *sc = device_private(self);
   8444 	uint32_t mdic = 0;
   8445 	int i;
   8446 
   8447 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8448 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8449 
   8450 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8451 		mdic = CSR_READ(sc, WMREG_MDIC);
   8452 		if (mdic & MDIC_READY)
   8453 			break;
   8454 		delay(50);
   8455 	}
   8456 
   8457 	if ((mdic & MDIC_READY) == 0)
   8458 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8459 		    device_xname(sc->sc_dev), phy, reg);
   8460 	else if (mdic & MDIC_E)
   8461 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8462 		    device_xname(sc->sc_dev), phy, reg);
   8463 }
   8464 
   8465 /*
   8466  * wm_gmii_i80003_readreg:	[mii interface function]
   8467  *
   8468  *	Read a PHY register on the kumeran
   8469  * This could be handled by the PHY layer if we didn't have to lock the
   8470  * ressource ...
   8471  */
   8472 static int
   8473 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8474 {
   8475 	struct wm_softc *sc = device_private(self);
   8476 	int sem;
   8477 	int rv;
   8478 
   8479 	if (phy != 1) /* only one PHY on kumeran bus */
   8480 		return 0;
   8481 
   8482 	sem = swfwphysem[sc->sc_funcid];
   8483 	if (wm_get_swfw_semaphore(sc, sem)) {
   8484 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8485 		    __func__);
   8486 		return 0;
   8487 	}
   8488 
   8489 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8490 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8491 		    reg >> GG82563_PAGE_SHIFT);
   8492 	} else {
   8493 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8494 		    reg >> GG82563_PAGE_SHIFT);
   8495 	}
   8496 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8497 	delay(200);
   8498 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8499 	delay(200);
   8500 
   8501 	wm_put_swfw_semaphore(sc, sem);
   8502 	return rv;
   8503 }
   8504 
   8505 /*
   8506  * wm_gmii_i80003_writereg:	[mii interface function]
   8507  *
   8508  *	Write a PHY register on the kumeran.
   8509  * This could be handled by the PHY layer if we didn't have to lock the
   8510  * ressource ...
   8511  */
   8512 static void
   8513 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8514 {
   8515 	struct wm_softc *sc = device_private(self);
   8516 	int sem;
   8517 
   8518 	if (phy != 1) /* only one PHY on kumeran bus */
   8519 		return;
   8520 
   8521 	sem = swfwphysem[sc->sc_funcid];
   8522 	if (wm_get_swfw_semaphore(sc, sem)) {
   8523 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8524 		    __func__);
   8525 		return;
   8526 	}
   8527 
   8528 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8529 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8530 		    reg >> GG82563_PAGE_SHIFT);
   8531 	} else {
   8532 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8533 		    reg >> GG82563_PAGE_SHIFT);
   8534 	}
   8535 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8536 	delay(200);
   8537 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8538 	delay(200);
   8539 
   8540 	wm_put_swfw_semaphore(sc, sem);
   8541 }
   8542 
   8543 /*
   8544  * wm_gmii_bm_readreg:	[mii interface function]
   8545  *
   8546  *	Read a PHY register on the kumeran
   8547  * This could be handled by the PHY layer if we didn't have to lock the
   8548  * ressource ...
   8549  */
   8550 static int
   8551 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8552 {
   8553 	struct wm_softc *sc = device_private(self);
   8554 	int sem;
   8555 	int rv;
   8556 
   8557 	sem = swfwphysem[sc->sc_funcid];
   8558 	if (wm_get_swfw_semaphore(sc, sem)) {
   8559 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8560 		    __func__);
   8561 		return 0;
   8562 	}
   8563 
   8564 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8565 		if (phy == 1)
   8566 			wm_gmii_i82544_writereg(self, phy,
   8567 			    MII_IGPHY_PAGE_SELECT, reg);
   8568 		else
   8569 			wm_gmii_i82544_writereg(self, phy,
   8570 			    GG82563_PHY_PAGE_SELECT,
   8571 			    reg >> GG82563_PAGE_SHIFT);
   8572 	}
   8573 
   8574 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8575 	wm_put_swfw_semaphore(sc, sem);
   8576 	return rv;
   8577 }
   8578 
   8579 /*
   8580  * wm_gmii_bm_writereg:	[mii interface function]
   8581  *
   8582  *	Write a PHY register on the kumeran.
   8583  * This could be handled by the PHY layer if we didn't have to lock the
   8584  * ressource ...
   8585  */
   8586 static void
   8587 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8588 {
   8589 	struct wm_softc *sc = device_private(self);
   8590 	int sem;
   8591 
   8592 	sem = swfwphysem[sc->sc_funcid];
   8593 	if (wm_get_swfw_semaphore(sc, sem)) {
   8594 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8595 		    __func__);
   8596 		return;
   8597 	}
   8598 
   8599 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8600 		if (phy == 1)
   8601 			wm_gmii_i82544_writereg(self, phy,
   8602 			    MII_IGPHY_PAGE_SELECT, reg);
   8603 		else
   8604 			wm_gmii_i82544_writereg(self, phy,
   8605 			    GG82563_PHY_PAGE_SELECT,
   8606 			    reg >> GG82563_PAGE_SHIFT);
   8607 	}
   8608 
   8609 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8610 	wm_put_swfw_semaphore(sc, sem);
   8611 }
   8612 
   8613 static void
   8614 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8615 {
   8616 	struct wm_softc *sc = device_private(self);
   8617 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8618 	uint16_t wuce;
   8619 
   8620 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8621 	if (sc->sc_type == WM_T_PCH) {
   8622 		/* XXX e1000 driver do nothing... why? */
   8623 	}
   8624 
   8625 	/* Set page 769 */
   8626 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8627 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8628 
   8629 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8630 
   8631 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8632 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8633 	    wuce | BM_WUC_ENABLE_BIT);
   8634 
   8635 	/* Select page 800 */
   8636 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8637 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8638 
   8639 	/* Write page 800 */
   8640 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8641 
   8642 	if (rd)
   8643 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8644 	else
   8645 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8646 
   8647 	/* Set page 769 */
   8648 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8649 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8650 
   8651 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8652 }
   8653 
   8654 /*
   8655  * wm_gmii_hv_readreg:	[mii interface function]
   8656  *
   8657  *	Read a PHY register on the kumeran
   8658  * This could be handled by the PHY layer if we didn't have to lock the
   8659  * ressource ...
   8660  */
   8661 static int
   8662 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8663 {
   8664 	struct wm_softc *sc = device_private(self);
   8665 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8666 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8667 	uint16_t val;
   8668 	int rv;
   8669 
   8670 	if (wm_get_swfwhw_semaphore(sc)) {
   8671 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8672 		    __func__);
   8673 		return 0;
   8674 	}
   8675 
   8676 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8677 	if (sc->sc_phytype == WMPHY_82577) {
   8678 		/* XXX must write */
   8679 	}
   8680 
   8681 	/* Page 800 works differently than the rest so it has its own func */
   8682 	if (page == BM_WUC_PAGE) {
   8683 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8684 		return val;
   8685 	}
   8686 
   8687 	/*
   8688 	 * Lower than page 768 works differently than the rest so it has its
   8689 	 * own func
   8690 	 */
   8691 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8692 		printf("gmii_hv_readreg!!!\n");
   8693 		return 0;
   8694 	}
   8695 
   8696 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8697 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8698 		    page << BME1000_PAGE_SHIFT);
   8699 	}
   8700 
   8701 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8702 	wm_put_swfwhw_semaphore(sc);
   8703 	return rv;
   8704 }
   8705 
   8706 /*
   8707  * wm_gmii_hv_writereg:	[mii interface function]
   8708  *
   8709  *	Write a PHY register on the kumeran.
   8710  * This could be handled by the PHY layer if we didn't have to lock the
   8711  * ressource ...
   8712  */
   8713 static void
   8714 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8715 {
   8716 	struct wm_softc *sc = device_private(self);
   8717 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8718 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8719 
   8720 	if (wm_get_swfwhw_semaphore(sc)) {
   8721 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8722 		    __func__);
   8723 		return;
   8724 	}
   8725 
   8726 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8727 
   8728 	/* Page 800 works differently than the rest so it has its own func */
   8729 	if (page == BM_WUC_PAGE) {
   8730 		uint16_t tmp;
   8731 
   8732 		tmp = val;
   8733 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8734 		return;
   8735 	}
   8736 
   8737 	/*
   8738 	 * Lower than page 768 works differently than the rest so it has its
   8739 	 * own func
   8740 	 */
   8741 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8742 		printf("gmii_hv_writereg!!!\n");
   8743 		return;
   8744 	}
   8745 
   8746 	/*
   8747 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8748 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8749 	 */
   8750 
   8751 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8752 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8753 		    page << BME1000_PAGE_SHIFT);
   8754 	}
   8755 
   8756 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8757 	wm_put_swfwhw_semaphore(sc);
   8758 }
   8759 
   8760 /*
   8761  * wm_gmii_82580_readreg:	[mii interface function]
   8762  *
   8763  *	Read a PHY register on the 82580 and I350.
   8764  * This could be handled by the PHY layer if we didn't have to lock the
   8765  * ressource ...
   8766  */
   8767 static int
   8768 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8769 {
   8770 	struct wm_softc *sc = device_private(self);
   8771 	int sem;
   8772 	int rv;
   8773 
   8774 	sem = swfwphysem[sc->sc_funcid];
   8775 	if (wm_get_swfw_semaphore(sc, sem)) {
   8776 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8777 		    __func__);
   8778 		return 0;
   8779 	}
   8780 
   8781 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8782 
   8783 	wm_put_swfw_semaphore(sc, sem);
   8784 	return rv;
   8785 }
   8786 
   8787 /*
   8788  * wm_gmii_82580_writereg:	[mii interface function]
   8789  *
   8790  *	Write a PHY register on the 82580 and I350.
   8791  * This could be handled by the PHY layer if we didn't have to lock the
   8792  * ressource ...
   8793  */
   8794 static void
   8795 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8796 {
   8797 	struct wm_softc *sc = device_private(self);
   8798 	int sem;
   8799 
   8800 	sem = swfwphysem[sc->sc_funcid];
   8801 	if (wm_get_swfw_semaphore(sc, sem)) {
   8802 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8803 		    __func__);
   8804 		return;
   8805 	}
   8806 
   8807 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8808 
   8809 	wm_put_swfw_semaphore(sc, sem);
   8810 }
   8811 
   8812 /*
   8813  * wm_gmii_gs40g_readreg:	[mii interface function]
   8814  *
   8815  *	Read a PHY register on the I2100 and I211.
   8816  * This could be handled by the PHY layer if we didn't have to lock the
   8817  * ressource ...
   8818  */
   8819 static int
   8820 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8821 {
   8822 	struct wm_softc *sc = device_private(self);
   8823 	int sem;
   8824 	int page, offset;
   8825 	int rv;
   8826 
   8827 	/* Acquire semaphore */
   8828 	sem = swfwphysem[sc->sc_funcid];
   8829 	if (wm_get_swfw_semaphore(sc, sem)) {
   8830 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8831 		    __func__);
   8832 		return 0;
   8833 	}
   8834 
   8835 	/* Page select */
   8836 	page = reg >> GS40G_PAGE_SHIFT;
   8837 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8838 
   8839 	/* Read reg */
   8840 	offset = reg & GS40G_OFFSET_MASK;
   8841 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8842 
   8843 	wm_put_swfw_semaphore(sc, sem);
   8844 	return rv;
   8845 }
   8846 
   8847 /*
   8848  * wm_gmii_gs40g_writereg:	[mii interface function]
   8849  *
   8850  *	Write a PHY register on the I210 and I211.
   8851  * This could be handled by the PHY layer if we didn't have to lock the
   8852  * ressource ...
   8853  */
   8854 static void
   8855 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8856 {
   8857 	struct wm_softc *sc = device_private(self);
   8858 	int sem;
   8859 	int page, offset;
   8860 
   8861 	/* Acquire semaphore */
   8862 	sem = swfwphysem[sc->sc_funcid];
   8863 	if (wm_get_swfw_semaphore(sc, sem)) {
   8864 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8865 		    __func__);
   8866 		return;
   8867 	}
   8868 
   8869 	/* Page select */
   8870 	page = reg >> GS40G_PAGE_SHIFT;
   8871 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8872 
   8873 	/* Write reg */
   8874 	offset = reg & GS40G_OFFSET_MASK;
   8875 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8876 
   8877 	/* Release semaphore */
   8878 	wm_put_swfw_semaphore(sc, sem);
   8879 }
   8880 
   8881 /*
   8882  * wm_gmii_statchg:	[mii interface function]
   8883  *
   8884  *	Callback from MII layer when media changes.
   8885  */
   8886 static void
   8887 wm_gmii_statchg(struct ifnet *ifp)
   8888 {
   8889 	struct wm_softc *sc = ifp->if_softc;
   8890 	struct mii_data *mii = &sc->sc_mii;
   8891 
   8892 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8893 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8894 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8895 
   8896 	/*
   8897 	 * Get flow control negotiation result.
   8898 	 */
   8899 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8900 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8901 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8902 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8903 	}
   8904 
   8905 	if (sc->sc_flowflags & IFM_FLOW) {
   8906 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8907 			sc->sc_ctrl |= CTRL_TFCE;
   8908 			sc->sc_fcrtl |= FCRTL_XONE;
   8909 		}
   8910 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8911 			sc->sc_ctrl |= CTRL_RFCE;
   8912 	}
   8913 
   8914 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8915 		DPRINTF(WM_DEBUG_LINK,
   8916 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8917 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8918 	} else {
   8919 		DPRINTF(WM_DEBUG_LINK,
   8920 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8921 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8922 	}
   8923 
   8924 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8925 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8926 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8927 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8928 	if (sc->sc_type == WM_T_80003) {
   8929 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8930 		case IFM_1000_T:
   8931 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8932 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8933 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8934 			break;
   8935 		default:
   8936 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8937 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8938 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8939 			break;
   8940 		}
   8941 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8942 	}
   8943 }
   8944 
   8945 /*
   8946  * wm_kmrn_readreg:
   8947  *
   8948  *	Read a kumeran register
   8949  */
   8950 static int
   8951 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8952 {
   8953 	int rv;
   8954 
   8955 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8956 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8957 			aprint_error_dev(sc->sc_dev,
   8958 			    "%s: failed to get semaphore\n", __func__);
   8959 			return 0;
   8960 		}
   8961 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8962 		if (wm_get_swfwhw_semaphore(sc)) {
   8963 			aprint_error_dev(sc->sc_dev,
   8964 			    "%s: failed to get semaphore\n", __func__);
   8965 			return 0;
   8966 		}
   8967 	}
   8968 
   8969 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8970 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8971 	    KUMCTRLSTA_REN);
   8972 	CSR_WRITE_FLUSH(sc);
   8973 	delay(2);
   8974 
   8975 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8976 
   8977 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8978 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8979 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8980 		wm_put_swfwhw_semaphore(sc);
   8981 
   8982 	return rv;
   8983 }
   8984 
   8985 /*
   8986  * wm_kmrn_writereg:
   8987  *
   8988  *	Write a kumeran register
   8989  */
   8990 static void
   8991 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8992 {
   8993 
   8994 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8995 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8996 			aprint_error_dev(sc->sc_dev,
   8997 			    "%s: failed to get semaphore\n", __func__);
   8998 			return;
   8999 		}
   9000 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9001 		if (wm_get_swfwhw_semaphore(sc)) {
   9002 			aprint_error_dev(sc->sc_dev,
   9003 			    "%s: failed to get semaphore\n", __func__);
   9004 			return;
   9005 		}
   9006 	}
   9007 
   9008 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9009 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9010 	    (val & KUMCTRLSTA_MASK));
   9011 
   9012 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9013 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9014 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9015 		wm_put_swfwhw_semaphore(sc);
   9016 }
   9017 
   9018 /* SGMII related */
   9019 
   9020 /*
   9021  * wm_sgmii_uses_mdio
   9022  *
   9023  * Check whether the transaction is to the internal PHY or the external
   9024  * MDIO interface. Return true if it's MDIO.
   9025  */
   9026 static bool
   9027 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9028 {
   9029 	uint32_t reg;
   9030 	bool ismdio = false;
   9031 
   9032 	switch (sc->sc_type) {
   9033 	case WM_T_82575:
   9034 	case WM_T_82576:
   9035 		reg = CSR_READ(sc, WMREG_MDIC);
   9036 		ismdio = ((reg & MDIC_DEST) != 0);
   9037 		break;
   9038 	case WM_T_82580:
   9039 	case WM_T_I350:
   9040 	case WM_T_I354:
   9041 	case WM_T_I210:
   9042 	case WM_T_I211:
   9043 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9044 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9045 		break;
   9046 	default:
   9047 		break;
   9048 	}
   9049 
   9050 	return ismdio;
   9051 }
   9052 
   9053 /*
   9054  * wm_sgmii_readreg:	[mii interface function]
   9055  *
   9056  *	Read a PHY register on the SGMII
   9057  * This could be handled by the PHY layer if we didn't have to lock the
   9058  * ressource ...
   9059  */
   9060 static int
   9061 wm_sgmii_readreg(device_t self, int phy, int reg)
   9062 {
   9063 	struct wm_softc *sc = device_private(self);
   9064 	uint32_t i2ccmd;
   9065 	int i, rv;
   9066 
   9067 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9068 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9069 		    __func__);
   9070 		return 0;
   9071 	}
   9072 
   9073 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9074 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9075 	    | I2CCMD_OPCODE_READ;
   9076 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9077 
   9078 	/* Poll the ready bit */
   9079 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9080 		delay(50);
   9081 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9082 		if (i2ccmd & I2CCMD_READY)
   9083 			break;
   9084 	}
   9085 	if ((i2ccmd & I2CCMD_READY) == 0)
   9086 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9087 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9088 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9089 
   9090 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9091 
   9092 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9093 	return rv;
   9094 }
   9095 
   9096 /*
   9097  * wm_sgmii_writereg:	[mii interface function]
   9098  *
   9099  *	Write a PHY register on the SGMII.
   9100  * This could be handled by the PHY layer if we didn't have to lock the
   9101  * ressource ...
   9102  */
   9103 static void
   9104 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9105 {
   9106 	struct wm_softc *sc = device_private(self);
   9107 	uint32_t i2ccmd;
   9108 	int i;
   9109 	int val_swapped;
   9110 
   9111 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9112 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9113 		    __func__);
   9114 		return;
   9115 	}
   9116 	/* Swap the data bytes for the I2C interface */
   9117 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9118 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9119 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9120 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9121 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9122 
   9123 	/* Poll the ready bit */
   9124 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9125 		delay(50);
   9126 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9127 		if (i2ccmd & I2CCMD_READY)
   9128 			break;
   9129 	}
   9130 	if ((i2ccmd & I2CCMD_READY) == 0)
   9131 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9132 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9133 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9134 
   9135 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9136 }
   9137 
   9138 /* TBI related */
   9139 
   9140 /*
   9141  * wm_tbi_mediainit:
   9142  *
   9143  *	Initialize media for use on 1000BASE-X devices.
   9144  */
   9145 static void
   9146 wm_tbi_mediainit(struct wm_softc *sc)
   9147 {
   9148 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9149 	const char *sep = "";
   9150 
   9151 	if (sc->sc_type < WM_T_82543)
   9152 		sc->sc_tipg = TIPG_WM_DFLT;
   9153 	else
   9154 		sc->sc_tipg = TIPG_LG_DFLT;
   9155 
   9156 	sc->sc_tbi_serdes_anegticks = 5;
   9157 
   9158 	/* Initialize our media structures */
   9159 	sc->sc_mii.mii_ifp = ifp;
   9160 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9161 
   9162 	if ((sc->sc_type >= WM_T_82575)
   9163 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9164 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9165 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9166 	else
   9167 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9168 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9169 
   9170 	/*
   9171 	 * SWD Pins:
   9172 	 *
   9173 	 *	0 = Link LED (output)
   9174 	 *	1 = Loss Of Signal (input)
   9175 	 */
   9176 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9177 
   9178 	/* XXX Perhaps this is only for TBI */
   9179 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9180 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9181 
   9182 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9183 		sc->sc_ctrl &= ~CTRL_LRST;
   9184 
   9185 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9186 
   9187 #define	ADD(ss, mm, dd)							\
   9188 do {									\
   9189 	aprint_normal("%s%s", sep, ss);					\
   9190 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9191 	sep = ", ";							\
   9192 } while (/*CONSTCOND*/0)
   9193 
   9194 	aprint_normal_dev(sc->sc_dev, "");
   9195 
   9196 	/* Only 82545 is LX */
   9197 	if (sc->sc_type == WM_T_82545) {
   9198 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9199 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9200 	} else {
   9201 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9202 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9203 	}
   9204 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9205 	aprint_normal("\n");
   9206 
   9207 #undef ADD
   9208 
   9209 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9210 }
   9211 
   9212 /*
   9213  * wm_tbi_mediachange:	[ifmedia interface function]
   9214  *
   9215  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9216  */
   9217 static int
   9218 wm_tbi_mediachange(struct ifnet *ifp)
   9219 {
   9220 	struct wm_softc *sc = ifp->if_softc;
   9221 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9222 	uint32_t status;
   9223 	int i;
   9224 
   9225 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9226 		/* XXX need some work for >= 82571 and < 82575 */
   9227 		if (sc->sc_type < WM_T_82575)
   9228 			return 0;
   9229 	}
   9230 
   9231 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9232 	    || (sc->sc_type >= WM_T_82575))
   9233 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9234 
   9235 	sc->sc_ctrl &= ~CTRL_LRST;
   9236 	sc->sc_txcw = TXCW_ANE;
   9237 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9238 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9239 	else if (ife->ifm_media & IFM_FDX)
   9240 		sc->sc_txcw |= TXCW_FD;
   9241 	else
   9242 		sc->sc_txcw |= TXCW_HD;
   9243 
   9244 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9245 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9246 
   9247 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9248 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9249 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9250 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9251 	CSR_WRITE_FLUSH(sc);
   9252 	delay(1000);
   9253 
   9254 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9255 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9256 
   9257 	/*
   9258 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9259 	 * optics detect a signal, 0 if they don't.
   9260 	 */
   9261 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9262 		/* Have signal; wait for the link to come up. */
   9263 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9264 			delay(10000);
   9265 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9266 				break;
   9267 		}
   9268 
   9269 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9270 			    device_xname(sc->sc_dev),i));
   9271 
   9272 		status = CSR_READ(sc, WMREG_STATUS);
   9273 		DPRINTF(WM_DEBUG_LINK,
   9274 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9275 			device_xname(sc->sc_dev),status, STATUS_LU));
   9276 		if (status & STATUS_LU) {
   9277 			/* Link is up. */
   9278 			DPRINTF(WM_DEBUG_LINK,
   9279 			    ("%s: LINK: set media -> link up %s\n",
   9280 			    device_xname(sc->sc_dev),
   9281 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9282 
   9283 			/*
   9284 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9285 			 * so we should update sc->sc_ctrl
   9286 			 */
   9287 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9288 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9289 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9290 			if (status & STATUS_FD)
   9291 				sc->sc_tctl |=
   9292 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9293 			else
   9294 				sc->sc_tctl |=
   9295 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9296 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9297 				sc->sc_fcrtl |= FCRTL_XONE;
   9298 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9299 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9300 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9301 				      sc->sc_fcrtl);
   9302 			sc->sc_tbi_linkup = 1;
   9303 		} else {
   9304 			if (i == WM_LINKUP_TIMEOUT)
   9305 				wm_check_for_link(sc);
   9306 			/* Link is down. */
   9307 			DPRINTF(WM_DEBUG_LINK,
   9308 			    ("%s: LINK: set media -> link down\n",
   9309 			    device_xname(sc->sc_dev)));
   9310 			sc->sc_tbi_linkup = 0;
   9311 		}
   9312 	} else {
   9313 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9314 		    device_xname(sc->sc_dev)));
   9315 		sc->sc_tbi_linkup = 0;
   9316 	}
   9317 
   9318 	wm_tbi_serdes_set_linkled(sc);
   9319 
   9320 	return 0;
   9321 }
   9322 
   9323 /*
   9324  * wm_tbi_mediastatus:	[ifmedia interface function]
   9325  *
   9326  *	Get the current interface media status on a 1000BASE-X device.
   9327  */
   9328 static void
   9329 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9330 {
   9331 	struct wm_softc *sc = ifp->if_softc;
   9332 	uint32_t ctrl, status;
   9333 
   9334 	ifmr->ifm_status = IFM_AVALID;
   9335 	ifmr->ifm_active = IFM_ETHER;
   9336 
   9337 	status = CSR_READ(sc, WMREG_STATUS);
   9338 	if ((status & STATUS_LU) == 0) {
   9339 		ifmr->ifm_active |= IFM_NONE;
   9340 		return;
   9341 	}
   9342 
   9343 	ifmr->ifm_status |= IFM_ACTIVE;
   9344 	/* Only 82545 is LX */
   9345 	if (sc->sc_type == WM_T_82545)
   9346 		ifmr->ifm_active |= IFM_1000_LX;
   9347 	else
   9348 		ifmr->ifm_active |= IFM_1000_SX;
   9349 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9350 		ifmr->ifm_active |= IFM_FDX;
   9351 	else
   9352 		ifmr->ifm_active |= IFM_HDX;
   9353 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9354 	if (ctrl & CTRL_RFCE)
   9355 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9356 	if (ctrl & CTRL_TFCE)
   9357 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9358 }
   9359 
   9360 /* XXX TBI only */
   9361 static int
   9362 wm_check_for_link(struct wm_softc *sc)
   9363 {
   9364 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9365 	uint32_t rxcw;
   9366 	uint32_t ctrl;
   9367 	uint32_t status;
   9368 	uint32_t sig;
   9369 
   9370 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9371 		/* XXX need some work for >= 82571 */
   9372 		if (sc->sc_type >= WM_T_82571) {
   9373 			sc->sc_tbi_linkup = 1;
   9374 			return 0;
   9375 		}
   9376 	}
   9377 
   9378 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9379 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9380 	status = CSR_READ(sc, WMREG_STATUS);
   9381 
   9382 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9383 
   9384 	DPRINTF(WM_DEBUG_LINK,
   9385 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9386 		device_xname(sc->sc_dev), __func__,
   9387 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9388 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9389 
   9390 	/*
   9391 	 * SWDPIN   LU RXCW
   9392 	 *      0    0    0
   9393 	 *      0    0    1	(should not happen)
   9394 	 *      0    1    0	(should not happen)
   9395 	 *      0    1    1	(should not happen)
   9396 	 *      1    0    0	Disable autonego and force linkup
   9397 	 *      1    0    1	got /C/ but not linkup yet
   9398 	 *      1    1    0	(linkup)
   9399 	 *      1    1    1	If IFM_AUTO, back to autonego
   9400 	 *
   9401 	 */
   9402 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9403 	    && ((status & STATUS_LU) == 0)
   9404 	    && ((rxcw & RXCW_C) == 0)) {
   9405 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9406 			__func__));
   9407 		sc->sc_tbi_linkup = 0;
   9408 		/* Disable auto-negotiation in the TXCW register */
   9409 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9410 
   9411 		/*
   9412 		 * Force link-up and also force full-duplex.
   9413 		 *
   9414 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9415 		 * so we should update sc->sc_ctrl
   9416 		 */
   9417 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9418 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9419 	} else if (((status & STATUS_LU) != 0)
   9420 	    && ((rxcw & RXCW_C) != 0)
   9421 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9422 		sc->sc_tbi_linkup = 1;
   9423 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9424 			__func__));
   9425 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9426 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9427 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9428 	    && ((rxcw & RXCW_C) != 0)) {
   9429 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9430 	} else {
   9431 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9432 			status));
   9433 	}
   9434 
   9435 	return 0;
   9436 }
   9437 
   9438 /*
   9439  * wm_tbi_tick:
   9440  *
   9441  *	Check the link on TBI devices.
   9442  *	This function acts as mii_tick().
   9443  */
   9444 static void
   9445 wm_tbi_tick(struct wm_softc *sc)
   9446 {
   9447 	struct mii_data *mii = &sc->sc_mii;
   9448 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9449 	uint32_t status;
   9450 
   9451 	KASSERT(WM_CORE_LOCKED(sc));
   9452 
   9453 	status = CSR_READ(sc, WMREG_STATUS);
   9454 
   9455 	/* XXX is this needed? */
   9456 	(void)CSR_READ(sc, WMREG_RXCW);
   9457 	(void)CSR_READ(sc, WMREG_CTRL);
   9458 
   9459 	/* set link status */
   9460 	if ((status & STATUS_LU) == 0) {
   9461 		DPRINTF(WM_DEBUG_LINK,
   9462 		    ("%s: LINK: checklink -> down\n",
   9463 			device_xname(sc->sc_dev)));
   9464 		sc->sc_tbi_linkup = 0;
   9465 	} else if (sc->sc_tbi_linkup == 0) {
   9466 		DPRINTF(WM_DEBUG_LINK,
   9467 		    ("%s: LINK: checklink -> up %s\n",
   9468 			device_xname(sc->sc_dev),
   9469 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9470 		sc->sc_tbi_linkup = 1;
   9471 		sc->sc_tbi_serdes_ticks = 0;
   9472 	}
   9473 
   9474 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9475 		goto setled;
   9476 
   9477 	if ((status & STATUS_LU) == 0) {
   9478 		sc->sc_tbi_linkup = 0;
   9479 		/* If the timer expired, retry autonegotiation */
   9480 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9481 		    && (++sc->sc_tbi_serdes_ticks
   9482 			>= sc->sc_tbi_serdes_anegticks)) {
   9483 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9484 			sc->sc_tbi_serdes_ticks = 0;
   9485 			/*
   9486 			 * Reset the link, and let autonegotiation do
   9487 			 * its thing
   9488 			 */
   9489 			sc->sc_ctrl |= CTRL_LRST;
   9490 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9491 			CSR_WRITE_FLUSH(sc);
   9492 			delay(1000);
   9493 			sc->sc_ctrl &= ~CTRL_LRST;
   9494 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9495 			CSR_WRITE_FLUSH(sc);
   9496 			delay(1000);
   9497 			CSR_WRITE(sc, WMREG_TXCW,
   9498 			    sc->sc_txcw & ~TXCW_ANE);
   9499 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9500 		}
   9501 	}
   9502 
   9503 setled:
   9504 	wm_tbi_serdes_set_linkled(sc);
   9505 }
   9506 
   9507 /* SERDES related */
   9508 static void
   9509 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9510 {
   9511 	uint32_t reg;
   9512 
   9513 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9514 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9515 		return;
   9516 
   9517 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9518 	reg |= PCS_CFG_PCS_EN;
   9519 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9520 
   9521 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9522 	reg &= ~CTRL_EXT_SWDPIN(3);
   9523 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9524 	CSR_WRITE_FLUSH(sc);
   9525 }
   9526 
   9527 static int
   9528 wm_serdes_mediachange(struct ifnet *ifp)
   9529 {
   9530 	struct wm_softc *sc = ifp->if_softc;
   9531 	bool pcs_autoneg = true; /* XXX */
   9532 	uint32_t ctrl_ext, pcs_lctl, reg;
   9533 
   9534 	/* XXX Currently, this function is not called on 8257[12] */
   9535 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9536 	    || (sc->sc_type >= WM_T_82575))
   9537 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9538 
   9539 	wm_serdes_power_up_link_82575(sc);
   9540 
   9541 	sc->sc_ctrl |= CTRL_SLU;
   9542 
   9543 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9544 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9545 
   9546 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9547 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9548 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9549 	case CTRL_EXT_LINK_MODE_SGMII:
   9550 		pcs_autoneg = true;
   9551 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9552 		break;
   9553 	case CTRL_EXT_LINK_MODE_1000KX:
   9554 		pcs_autoneg = false;
   9555 		/* FALLTHROUGH */
   9556 	default:
   9557 		if ((sc->sc_type == WM_T_82575)
   9558 		    || (sc->sc_type == WM_T_82576)) {
   9559 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9560 				pcs_autoneg = false;
   9561 		}
   9562 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9563 		    | CTRL_FRCFDX;
   9564 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9565 	}
   9566 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9567 
   9568 	if (pcs_autoneg) {
   9569 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9570 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9571 
   9572 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9573 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9574 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9575 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9576 	} else
   9577 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9578 
   9579 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9580 
   9581 
   9582 	return 0;
   9583 }
   9584 
   9585 static void
   9586 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9587 {
   9588 	struct wm_softc *sc = ifp->if_softc;
   9589 	struct mii_data *mii = &sc->sc_mii;
   9590 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9591 	uint32_t pcs_adv, pcs_lpab, reg;
   9592 
   9593 	ifmr->ifm_status = IFM_AVALID;
   9594 	ifmr->ifm_active = IFM_ETHER;
   9595 
   9596 	/* Check PCS */
   9597 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9598 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9599 		ifmr->ifm_active |= IFM_NONE;
   9600 		sc->sc_tbi_linkup = 0;
   9601 		goto setled;
   9602 	}
   9603 
   9604 	sc->sc_tbi_linkup = 1;
   9605 	ifmr->ifm_status |= IFM_ACTIVE;
   9606 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9607 	if ((reg & PCS_LSTS_FDX) != 0)
   9608 		ifmr->ifm_active |= IFM_FDX;
   9609 	else
   9610 		ifmr->ifm_active |= IFM_HDX;
   9611 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9612 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9613 		/* Check flow */
   9614 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9615 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9616 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9617 			goto setled;
   9618 		}
   9619 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9620 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9621 		DPRINTF(WM_DEBUG_LINK,
   9622 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9623 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9624 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9625 			mii->mii_media_active |= IFM_FLOW
   9626 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9627 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9628 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9629 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9630 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9631 			mii->mii_media_active |= IFM_FLOW
   9632 			    | IFM_ETH_TXPAUSE;
   9633 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9634 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9635 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9636 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9637 			mii->mii_media_active |= IFM_FLOW
   9638 			    | IFM_ETH_RXPAUSE;
   9639 		} else {
   9640 		}
   9641 	}
   9642 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9643 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9644 setled:
   9645 	wm_tbi_serdes_set_linkled(sc);
   9646 }
   9647 
   9648 /*
   9649  * wm_serdes_tick:
   9650  *
   9651  *	Check the link on serdes devices.
   9652  */
   9653 static void
   9654 wm_serdes_tick(struct wm_softc *sc)
   9655 {
   9656 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9657 	struct mii_data *mii = &sc->sc_mii;
   9658 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9659 	uint32_t reg;
   9660 
   9661 	KASSERT(WM_CORE_LOCKED(sc));
   9662 
   9663 	mii->mii_media_status = IFM_AVALID;
   9664 	mii->mii_media_active = IFM_ETHER;
   9665 
   9666 	/* Check PCS */
   9667 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9668 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9669 		mii->mii_media_status |= IFM_ACTIVE;
   9670 		sc->sc_tbi_linkup = 1;
   9671 		sc->sc_tbi_serdes_ticks = 0;
   9672 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9673 		if ((reg & PCS_LSTS_FDX) != 0)
   9674 			mii->mii_media_active |= IFM_FDX;
   9675 		else
   9676 			mii->mii_media_active |= IFM_HDX;
   9677 	} else {
   9678 		mii->mii_media_status |= IFM_NONE;
   9679 		sc->sc_tbi_linkup = 0;
   9680 		    /* If the timer expired, retry autonegotiation */
   9681 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9682 		    && (++sc->sc_tbi_serdes_ticks
   9683 			>= sc->sc_tbi_serdes_anegticks)) {
   9684 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9685 			sc->sc_tbi_serdes_ticks = 0;
   9686 			/* XXX */
   9687 			wm_serdes_mediachange(ifp);
   9688 		}
   9689 	}
   9690 
   9691 	wm_tbi_serdes_set_linkled(sc);
   9692 }
   9693 
   9694 /* SFP related */
   9695 
   9696 static int
   9697 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9698 {
   9699 	uint32_t i2ccmd;
   9700 	int i;
   9701 
   9702 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9703 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9704 
   9705 	/* Poll the ready bit */
   9706 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9707 		delay(50);
   9708 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9709 		if (i2ccmd & I2CCMD_READY)
   9710 			break;
   9711 	}
   9712 	if ((i2ccmd & I2CCMD_READY) == 0)
   9713 		return -1;
   9714 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9715 		return -1;
   9716 
   9717 	*data = i2ccmd & 0x00ff;
   9718 
   9719 	return 0;
   9720 }
   9721 
   9722 static uint32_t
   9723 wm_sfp_get_media_type(struct wm_softc *sc)
   9724 {
   9725 	uint32_t ctrl_ext;
   9726 	uint8_t val = 0;
   9727 	int timeout = 3;
   9728 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9729 	int rv = -1;
   9730 
   9731 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9732 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9733 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9734 	CSR_WRITE_FLUSH(sc);
   9735 
   9736 	/* Read SFP module data */
   9737 	while (timeout) {
   9738 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9739 		if (rv == 0)
   9740 			break;
   9741 		delay(100*1000); /* XXX too big */
   9742 		timeout--;
   9743 	}
   9744 	if (rv != 0)
   9745 		goto out;
   9746 	switch (val) {
   9747 	case SFF_SFP_ID_SFF:
   9748 		aprint_normal_dev(sc->sc_dev,
   9749 		    "Module/Connector soldered to board\n");
   9750 		break;
   9751 	case SFF_SFP_ID_SFP:
   9752 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9753 		break;
   9754 	case SFF_SFP_ID_UNKNOWN:
   9755 		goto out;
   9756 	default:
   9757 		break;
   9758 	}
   9759 
   9760 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9761 	if (rv != 0) {
   9762 		goto out;
   9763 	}
   9764 
   9765 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9766 		mediatype = WM_MEDIATYPE_SERDES;
   9767 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9768 		sc->sc_flags |= WM_F_SGMII;
   9769 		mediatype = WM_MEDIATYPE_COPPER;
   9770 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9771 		sc->sc_flags |= WM_F_SGMII;
   9772 		mediatype = WM_MEDIATYPE_SERDES;
   9773 	}
   9774 
   9775 out:
   9776 	/* Restore I2C interface setting */
   9777 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9778 
   9779 	return mediatype;
   9780 }
   9781 /*
   9782  * NVM related.
   9783  * Microwire, SPI (w/wo EERD) and Flash.
   9784  */
   9785 
   9786 /* Both spi and uwire */
   9787 
   9788 /*
   9789  * wm_eeprom_sendbits:
   9790  *
   9791  *	Send a series of bits to the EEPROM.
   9792  */
   9793 static void
   9794 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9795 {
   9796 	uint32_t reg;
   9797 	int x;
   9798 
   9799 	reg = CSR_READ(sc, WMREG_EECD);
   9800 
   9801 	for (x = nbits; x > 0; x--) {
   9802 		if (bits & (1U << (x - 1)))
   9803 			reg |= EECD_DI;
   9804 		else
   9805 			reg &= ~EECD_DI;
   9806 		CSR_WRITE(sc, WMREG_EECD, reg);
   9807 		CSR_WRITE_FLUSH(sc);
   9808 		delay(2);
   9809 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9810 		CSR_WRITE_FLUSH(sc);
   9811 		delay(2);
   9812 		CSR_WRITE(sc, WMREG_EECD, reg);
   9813 		CSR_WRITE_FLUSH(sc);
   9814 		delay(2);
   9815 	}
   9816 }
   9817 
   9818 /*
   9819  * wm_eeprom_recvbits:
   9820  *
   9821  *	Receive a series of bits from the EEPROM.
   9822  */
   9823 static void
   9824 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9825 {
   9826 	uint32_t reg, val;
   9827 	int x;
   9828 
   9829 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9830 
   9831 	val = 0;
   9832 	for (x = nbits; x > 0; x--) {
   9833 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9834 		CSR_WRITE_FLUSH(sc);
   9835 		delay(2);
   9836 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9837 			val |= (1U << (x - 1));
   9838 		CSR_WRITE(sc, WMREG_EECD, reg);
   9839 		CSR_WRITE_FLUSH(sc);
   9840 		delay(2);
   9841 	}
   9842 	*valp = val;
   9843 }
   9844 
   9845 /* Microwire */
   9846 
   9847 /*
   9848  * wm_nvm_read_uwire:
   9849  *
   9850  *	Read a word from the EEPROM using the MicroWire protocol.
   9851  */
   9852 static int
   9853 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9854 {
   9855 	uint32_t reg, val;
   9856 	int i;
   9857 
   9858 	for (i = 0; i < wordcnt; i++) {
   9859 		/* Clear SK and DI. */
   9860 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9861 		CSR_WRITE(sc, WMREG_EECD, reg);
   9862 
   9863 		/*
   9864 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9865 		 * and Xen.
   9866 		 *
   9867 		 * We use this workaround only for 82540 because qemu's
   9868 		 * e1000 act as 82540.
   9869 		 */
   9870 		if (sc->sc_type == WM_T_82540) {
   9871 			reg |= EECD_SK;
   9872 			CSR_WRITE(sc, WMREG_EECD, reg);
   9873 			reg &= ~EECD_SK;
   9874 			CSR_WRITE(sc, WMREG_EECD, reg);
   9875 			CSR_WRITE_FLUSH(sc);
   9876 			delay(2);
   9877 		}
   9878 		/* XXX: end of workaround */
   9879 
   9880 		/* Set CHIP SELECT. */
   9881 		reg |= EECD_CS;
   9882 		CSR_WRITE(sc, WMREG_EECD, reg);
   9883 		CSR_WRITE_FLUSH(sc);
   9884 		delay(2);
   9885 
   9886 		/* Shift in the READ command. */
   9887 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9888 
   9889 		/* Shift in address. */
   9890 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9891 
   9892 		/* Shift out the data. */
   9893 		wm_eeprom_recvbits(sc, &val, 16);
   9894 		data[i] = val & 0xffff;
   9895 
   9896 		/* Clear CHIP SELECT. */
   9897 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9898 		CSR_WRITE(sc, WMREG_EECD, reg);
   9899 		CSR_WRITE_FLUSH(sc);
   9900 		delay(2);
   9901 	}
   9902 
   9903 	return 0;
   9904 }
   9905 
   9906 /* SPI */
   9907 
   9908 /*
   9909  * Set SPI and FLASH related information from the EECD register.
   9910  * For 82541 and 82547, the word size is taken from EEPROM.
   9911  */
   9912 static int
   9913 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9914 {
   9915 	int size;
   9916 	uint32_t reg;
   9917 	uint16_t data;
   9918 
   9919 	reg = CSR_READ(sc, WMREG_EECD);
   9920 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9921 
   9922 	/* Read the size of NVM from EECD by default */
   9923 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9924 	switch (sc->sc_type) {
   9925 	case WM_T_82541:
   9926 	case WM_T_82541_2:
   9927 	case WM_T_82547:
   9928 	case WM_T_82547_2:
   9929 		/* Set dummy value to access EEPROM */
   9930 		sc->sc_nvm_wordsize = 64;
   9931 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9932 		reg = data;
   9933 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9934 		if (size == 0)
   9935 			size = 6; /* 64 word size */
   9936 		else
   9937 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9938 		break;
   9939 	case WM_T_80003:
   9940 	case WM_T_82571:
   9941 	case WM_T_82572:
   9942 	case WM_T_82573: /* SPI case */
   9943 	case WM_T_82574: /* SPI case */
   9944 	case WM_T_82583: /* SPI case */
   9945 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9946 		if (size > 14)
   9947 			size = 14;
   9948 		break;
   9949 	case WM_T_82575:
   9950 	case WM_T_82576:
   9951 	case WM_T_82580:
   9952 	case WM_T_I350:
   9953 	case WM_T_I354:
   9954 	case WM_T_I210:
   9955 	case WM_T_I211:
   9956 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9957 		if (size > 15)
   9958 			size = 15;
   9959 		break;
   9960 	default:
   9961 		aprint_error_dev(sc->sc_dev,
   9962 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9963 		return -1;
   9964 		break;
   9965 	}
   9966 
   9967 	sc->sc_nvm_wordsize = 1 << size;
   9968 
   9969 	return 0;
   9970 }
   9971 
   9972 /*
   9973  * wm_nvm_ready_spi:
   9974  *
   9975  *	Wait for a SPI EEPROM to be ready for commands.
   9976  */
   9977 static int
   9978 wm_nvm_ready_spi(struct wm_softc *sc)
   9979 {
   9980 	uint32_t val;
   9981 	int usec;
   9982 
   9983 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9984 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9985 		wm_eeprom_recvbits(sc, &val, 8);
   9986 		if ((val & SPI_SR_RDY) == 0)
   9987 			break;
   9988 	}
   9989 	if (usec >= SPI_MAX_RETRIES) {
   9990 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   9991 		return 1;
   9992 	}
   9993 	return 0;
   9994 }
   9995 
   9996 /*
   9997  * wm_nvm_read_spi:
   9998  *
   9999  *	Read a work from the EEPROM using the SPI protocol.
   10000  */
   10001 static int
   10002 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10003 {
   10004 	uint32_t reg, val;
   10005 	int i;
   10006 	uint8_t opc;
   10007 
   10008 	/* Clear SK and CS. */
   10009 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10010 	CSR_WRITE(sc, WMREG_EECD, reg);
   10011 	CSR_WRITE_FLUSH(sc);
   10012 	delay(2);
   10013 
   10014 	if (wm_nvm_ready_spi(sc))
   10015 		return 1;
   10016 
   10017 	/* Toggle CS to flush commands. */
   10018 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10019 	CSR_WRITE_FLUSH(sc);
   10020 	delay(2);
   10021 	CSR_WRITE(sc, WMREG_EECD, reg);
   10022 	CSR_WRITE_FLUSH(sc);
   10023 	delay(2);
   10024 
   10025 	opc = SPI_OPC_READ;
   10026 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10027 		opc |= SPI_OPC_A8;
   10028 
   10029 	wm_eeprom_sendbits(sc, opc, 8);
   10030 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10031 
   10032 	for (i = 0; i < wordcnt; i++) {
   10033 		wm_eeprom_recvbits(sc, &val, 16);
   10034 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10035 	}
   10036 
   10037 	/* Raise CS and clear SK. */
   10038 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10039 	CSR_WRITE(sc, WMREG_EECD, reg);
   10040 	CSR_WRITE_FLUSH(sc);
   10041 	delay(2);
   10042 
   10043 	return 0;
   10044 }
   10045 
   10046 /* Using with EERD */
   10047 
   10048 static int
   10049 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10050 {
   10051 	uint32_t attempts = 100000;
   10052 	uint32_t i, reg = 0;
   10053 	int32_t done = -1;
   10054 
   10055 	for (i = 0; i < attempts; i++) {
   10056 		reg = CSR_READ(sc, rw);
   10057 
   10058 		if (reg & EERD_DONE) {
   10059 			done = 0;
   10060 			break;
   10061 		}
   10062 		delay(5);
   10063 	}
   10064 
   10065 	return done;
   10066 }
   10067 
   10068 static int
   10069 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10070     uint16_t *data)
   10071 {
   10072 	int i, eerd = 0;
   10073 	int error = 0;
   10074 
   10075 	for (i = 0; i < wordcnt; i++) {
   10076 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10077 
   10078 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10079 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10080 		if (error != 0)
   10081 			break;
   10082 
   10083 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10084 	}
   10085 
   10086 	return error;
   10087 }
   10088 
   10089 /* Flash */
   10090 
   10091 static int
   10092 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10093 {
   10094 	uint32_t eecd;
   10095 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10096 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10097 	uint8_t sig_byte = 0;
   10098 
   10099 	switch (sc->sc_type) {
   10100 	case WM_T_PCH_SPT:
   10101 		/*
   10102 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10103 		 * sector valid bits from the NVM.
   10104 		 */
   10105 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10106 		if ((*bank == 0) || (*bank == 1)) {
   10107 			aprint_error_dev(sc->sc_dev,
   10108 					 "%s: no valid NVM bank present\n",
   10109 				__func__);
   10110 			return -1;
   10111 		} else {
   10112 			*bank = *bank - 2;
   10113 			return 0;
   10114 		}
   10115 	case WM_T_ICH8:
   10116 	case WM_T_ICH9:
   10117 		eecd = CSR_READ(sc, WMREG_EECD);
   10118 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10119 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10120 			return 0;
   10121 		}
   10122 		/* FALLTHROUGH */
   10123 	default:
   10124 		/* Default to 0 */
   10125 		*bank = 0;
   10126 
   10127 		/* Check bank 0 */
   10128 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10129 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10130 			*bank = 0;
   10131 			return 0;
   10132 		}
   10133 
   10134 		/* Check bank 1 */
   10135 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10136 		    &sig_byte);
   10137 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10138 			*bank = 1;
   10139 			return 0;
   10140 		}
   10141 	}
   10142 
   10143 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10144 		device_xname(sc->sc_dev)));
   10145 	return -1;
   10146 }
   10147 
   10148 /******************************************************************************
   10149  * This function does initial flash setup so that a new read/write/erase cycle
   10150  * can be started.
   10151  *
   10152  * sc - The pointer to the hw structure
   10153  ****************************************************************************/
   10154 static int32_t
   10155 wm_ich8_cycle_init(struct wm_softc *sc)
   10156 {
   10157 	uint16_t hsfsts;
   10158 	int32_t error = 1;
   10159 	int32_t i     = 0;
   10160 
   10161 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10162 
   10163 	/* May be check the Flash Des Valid bit in Hw status */
   10164 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10165 		return error;
   10166 	}
   10167 
   10168 	/* Clear FCERR in Hw status by writing 1 */
   10169 	/* Clear DAEL in Hw status by writing a 1 */
   10170 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10171 
   10172 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10173 
   10174 	/*
   10175 	 * Either we should have a hardware SPI cycle in progress bit to check
   10176 	 * against, in order to start a new cycle or FDONE bit should be
   10177 	 * changed in the hardware so that it is 1 after harware reset, which
   10178 	 * can then be used as an indication whether a cycle is in progress or
   10179 	 * has been completed .. we should also have some software semaphore
   10180 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10181 	 * threads access to those bits can be sequentiallized or a way so that
   10182 	 * 2 threads dont start the cycle at the same time
   10183 	 */
   10184 
   10185 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10186 		/*
   10187 		 * There is no cycle running at present, so we can start a
   10188 		 * cycle
   10189 		 */
   10190 
   10191 		/* Begin by setting Flash Cycle Done. */
   10192 		hsfsts |= HSFSTS_DONE;
   10193 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10194 		error = 0;
   10195 	} else {
   10196 		/*
   10197 		 * otherwise poll for sometime so the current cycle has a
   10198 		 * chance to end before giving up.
   10199 		 */
   10200 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10201 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10202 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10203 				error = 0;
   10204 				break;
   10205 			}
   10206 			delay(1);
   10207 		}
   10208 		if (error == 0) {
   10209 			/*
   10210 			 * Successful in waiting for previous cycle to timeout,
   10211 			 * now set the Flash Cycle Done.
   10212 			 */
   10213 			hsfsts |= HSFSTS_DONE;
   10214 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10215 		}
   10216 	}
   10217 	return error;
   10218 }
   10219 
   10220 /******************************************************************************
   10221  * This function starts a flash cycle and waits for its completion
   10222  *
   10223  * sc - The pointer to the hw structure
   10224  ****************************************************************************/
   10225 static int32_t
   10226 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10227 {
   10228 	uint16_t hsflctl;
   10229 	uint16_t hsfsts;
   10230 	int32_t error = 1;
   10231 	uint32_t i = 0;
   10232 
   10233 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10234 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10235 	hsflctl |= HSFCTL_GO;
   10236 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10237 
   10238 	/* Wait till FDONE bit is set to 1 */
   10239 	do {
   10240 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10241 		if (hsfsts & HSFSTS_DONE)
   10242 			break;
   10243 		delay(1);
   10244 		i++;
   10245 	} while (i < timeout);
   10246 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10247 		error = 0;
   10248 
   10249 	return error;
   10250 }
   10251 
   10252 /******************************************************************************
   10253  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10254  *
   10255  * sc - The pointer to the hw structure
   10256  * index - The index of the byte or word to read.
   10257  * size - Size of data to read, 1=byte 2=word, 4=dword
   10258  * data - Pointer to the word to store the value read.
   10259  *****************************************************************************/
   10260 static int32_t
   10261 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10262     uint32_t size, uint32_t *data)
   10263 {
   10264 	uint16_t hsfsts;
   10265 	uint16_t hsflctl;
   10266 	uint32_t flash_linear_address;
   10267 	uint32_t flash_data = 0;
   10268 	int32_t error = 1;
   10269 	int32_t count = 0;
   10270 
   10271 	if (size < 1  || size > 4 || data == 0x0 ||
   10272 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10273 		return error;
   10274 
   10275 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10276 	    sc->sc_ich8_flash_base;
   10277 
   10278 	do {
   10279 		delay(1);
   10280 		/* Steps */
   10281 		error = wm_ich8_cycle_init(sc);
   10282 		if (error)
   10283 			break;
   10284 
   10285 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10286 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10287 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10288 		    & HSFCTL_BCOUNT_MASK;
   10289 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10290 		if (sc->sc_type == WM_T_PCH_SPT) {
   10291 			/*
   10292 			 * In SPT, This register is in Lan memory space, not
   10293 			 * flash. Therefore, only 32 bit access is supported.
   10294 			 */
   10295 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10296 			    (uint32_t)hsflctl);
   10297 		} else
   10298 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10299 
   10300 		/*
   10301 		 * Write the last 24 bits of index into Flash Linear address
   10302 		 * field in Flash Address
   10303 		 */
   10304 		/* TODO: TBD maybe check the index against the size of flash */
   10305 
   10306 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10307 
   10308 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10309 
   10310 		/*
   10311 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10312 		 * the whole sequence a few more times, else read in (shift in)
   10313 		 * the Flash Data0, the order is least significant byte first
   10314 		 * msb to lsb
   10315 		 */
   10316 		if (error == 0) {
   10317 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10318 			if (size == 1)
   10319 				*data = (uint8_t)(flash_data & 0x000000FF);
   10320 			else if (size == 2)
   10321 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10322 			else if (size == 4)
   10323 				*data = (uint32_t)flash_data;
   10324 			break;
   10325 		} else {
   10326 			/*
   10327 			 * If we've gotten here, then things are probably
   10328 			 * completely hosed, but if the error condition is
   10329 			 * detected, it won't hurt to give it another try...
   10330 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10331 			 */
   10332 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10333 			if (hsfsts & HSFSTS_ERR) {
   10334 				/* Repeat for some time before giving up. */
   10335 				continue;
   10336 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10337 				break;
   10338 		}
   10339 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10340 
   10341 	return error;
   10342 }
   10343 
   10344 /******************************************************************************
   10345  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10346  *
   10347  * sc - pointer to wm_hw structure
   10348  * index - The index of the byte to read.
   10349  * data - Pointer to a byte to store the value read.
   10350  *****************************************************************************/
   10351 static int32_t
   10352 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10353 {
   10354 	int32_t status;
   10355 	uint32_t word = 0;
   10356 
   10357 	status = wm_read_ich8_data(sc, index, 1, &word);
   10358 	if (status == 0)
   10359 		*data = (uint8_t)word;
   10360 	else
   10361 		*data = 0;
   10362 
   10363 	return status;
   10364 }
   10365 
   10366 /******************************************************************************
   10367  * Reads a word from the NVM using the ICH8 flash access registers.
   10368  *
   10369  * sc - pointer to wm_hw structure
   10370  * index - The starting byte index of the word to read.
   10371  * data - Pointer to a word to store the value read.
   10372  *****************************************************************************/
   10373 static int32_t
   10374 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10375 {
   10376 	int32_t status;
   10377 	uint32_t word = 0;
   10378 
   10379 	status = wm_read_ich8_data(sc, index, 2, &word);
   10380 	if (status == 0)
   10381 		*data = (uint16_t)word;
   10382 	else
   10383 		*data = 0;
   10384 
   10385 	return status;
   10386 }
   10387 
   10388 /******************************************************************************
   10389  * Reads a dword from the NVM using the ICH8 flash access registers.
   10390  *
   10391  * sc - pointer to wm_hw structure
   10392  * index - The starting byte index of the word to read.
   10393  * data - Pointer to a word to store the value read.
   10394  *****************************************************************************/
   10395 static int32_t
   10396 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10397 {
   10398 	int32_t status;
   10399 
   10400 	status = wm_read_ich8_data(sc, index, 4, data);
   10401 	return status;
   10402 }
   10403 
   10404 /******************************************************************************
   10405  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10406  * register.
   10407  *
   10408  * sc - Struct containing variables accessed by shared code
   10409  * offset - offset of word in the EEPROM to read
   10410  * data - word read from the EEPROM
   10411  * words - number of words to read
   10412  *****************************************************************************/
   10413 static int
   10414 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10415 {
   10416 	int32_t  error = 0;
   10417 	uint32_t flash_bank = 0;
   10418 	uint32_t act_offset = 0;
   10419 	uint32_t bank_offset = 0;
   10420 	uint16_t word = 0;
   10421 	uint16_t i = 0;
   10422 
   10423 	/*
   10424 	 * We need to know which is the valid flash bank.  In the event
   10425 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10426 	 * managing flash_bank.  So it cannot be trusted and needs
   10427 	 * to be updated with each read.
   10428 	 */
   10429 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10430 	if (error) {
   10431 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10432 			device_xname(sc->sc_dev)));
   10433 		flash_bank = 0;
   10434 	}
   10435 
   10436 	/*
   10437 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10438 	 * size
   10439 	 */
   10440 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10441 
   10442 	error = wm_get_swfwhw_semaphore(sc);
   10443 	if (error) {
   10444 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10445 		    __func__);
   10446 		return error;
   10447 	}
   10448 
   10449 	for (i = 0; i < words; i++) {
   10450 		/* The NVM part needs a byte offset, hence * 2 */
   10451 		act_offset = bank_offset + ((offset + i) * 2);
   10452 		error = wm_read_ich8_word(sc, act_offset, &word);
   10453 		if (error) {
   10454 			aprint_error_dev(sc->sc_dev,
   10455 			    "%s: failed to read NVM\n", __func__);
   10456 			break;
   10457 		}
   10458 		data[i] = word;
   10459 	}
   10460 
   10461 	wm_put_swfwhw_semaphore(sc);
   10462 	return error;
   10463 }
   10464 
   10465 /******************************************************************************
   10466  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10467  * register.
   10468  *
   10469  * sc - Struct containing variables accessed by shared code
   10470  * offset - offset of word in the EEPROM to read
   10471  * data - word read from the EEPROM
   10472  * words - number of words to read
   10473  *****************************************************************************/
   10474 static int
   10475 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10476 {
   10477 	int32_t  error = 0;
   10478 	uint32_t flash_bank = 0;
   10479 	uint32_t act_offset = 0;
   10480 	uint32_t bank_offset = 0;
   10481 	uint32_t dword = 0;
   10482 	uint16_t i = 0;
   10483 
   10484 	/*
   10485 	 * We need to know which is the valid flash bank.  In the event
   10486 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10487 	 * managing flash_bank.  So it cannot be trusted and needs
   10488 	 * to be updated with each read.
   10489 	 */
   10490 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10491 	if (error) {
   10492 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10493 			device_xname(sc->sc_dev)));
   10494 		flash_bank = 0;
   10495 	}
   10496 
   10497 	/*
   10498 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10499 	 * size
   10500 	 */
   10501 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10502 
   10503 	error = wm_get_swfwhw_semaphore(sc);
   10504 	if (error) {
   10505 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10506 		    __func__);
   10507 		return error;
   10508 	}
   10509 
   10510 	for (i = 0; i < words; i++) {
   10511 		/* The NVM part needs a byte offset, hence * 2 */
   10512 		act_offset = bank_offset + ((offset + i) * 2);
   10513 		/* but we must read dword aligned, so mask ... */
   10514 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10515 		if (error) {
   10516 			aprint_error_dev(sc->sc_dev,
   10517 			    "%s: failed to read NVM\n", __func__);
   10518 			break;
   10519 		}
   10520 		/* ... and pick out low or high word */
   10521 		if ((act_offset & 0x2) == 0)
   10522 			data[i] = (uint16_t)(dword & 0xFFFF);
   10523 		else
   10524 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10525 	}
   10526 
   10527 	wm_put_swfwhw_semaphore(sc);
   10528 	return error;
   10529 }
   10530 
   10531 /* iNVM */
   10532 
   10533 static int
   10534 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10535 {
   10536 	int32_t  rv = 0;
   10537 	uint32_t invm_dword;
   10538 	uint16_t i;
   10539 	uint8_t record_type, word_address;
   10540 
   10541 	for (i = 0; i < INVM_SIZE; i++) {
   10542 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10543 		/* Get record type */
   10544 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10545 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10546 			break;
   10547 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10548 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10549 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10550 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10551 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10552 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10553 			if (word_address == address) {
   10554 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10555 				rv = 0;
   10556 				break;
   10557 			}
   10558 		}
   10559 	}
   10560 
   10561 	return rv;
   10562 }
   10563 
   10564 static int
   10565 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10566 {
   10567 	int rv = 0;
   10568 	int i;
   10569 
   10570 	for (i = 0; i < words; i++) {
   10571 		switch (offset + i) {
   10572 		case NVM_OFF_MACADDR:
   10573 		case NVM_OFF_MACADDR1:
   10574 		case NVM_OFF_MACADDR2:
   10575 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10576 			if (rv != 0) {
   10577 				data[i] = 0xffff;
   10578 				rv = -1;
   10579 			}
   10580 			break;
   10581 		case NVM_OFF_CFG2:
   10582 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10583 			if (rv != 0) {
   10584 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10585 				rv = 0;
   10586 			}
   10587 			break;
   10588 		case NVM_OFF_CFG4:
   10589 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10590 			if (rv != 0) {
   10591 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10592 				rv = 0;
   10593 			}
   10594 			break;
   10595 		case NVM_OFF_LED_1_CFG:
   10596 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10597 			if (rv != 0) {
   10598 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10599 				rv = 0;
   10600 			}
   10601 			break;
   10602 		case NVM_OFF_LED_0_2_CFG:
   10603 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10604 			if (rv != 0) {
   10605 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10606 				rv = 0;
   10607 			}
   10608 			break;
   10609 		case NVM_OFF_ID_LED_SETTINGS:
   10610 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10611 			if (rv != 0) {
   10612 				*data = ID_LED_RESERVED_FFFF;
   10613 				rv = 0;
   10614 			}
   10615 			break;
   10616 		default:
   10617 			DPRINTF(WM_DEBUG_NVM,
   10618 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10619 			*data = NVM_RESERVED_WORD;
   10620 			break;
   10621 		}
   10622 	}
   10623 
   10624 	return rv;
   10625 }
   10626 
   10627 /* Lock, detecting NVM type, validate checksum, version and read */
   10628 
   10629 /*
   10630  * wm_nvm_acquire:
   10631  *
   10632  *	Perform the EEPROM handshake required on some chips.
   10633  */
   10634 static int
   10635 wm_nvm_acquire(struct wm_softc *sc)
   10636 {
   10637 	uint32_t reg;
   10638 	int x;
   10639 	int ret = 0;
   10640 
   10641 	/* always success */
   10642 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10643 		return 0;
   10644 
   10645 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10646 		ret = wm_get_swfwhw_semaphore(sc);
   10647 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10648 		/* This will also do wm_get_swsm_semaphore() if needed */
   10649 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10650 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10651 		ret = wm_get_swsm_semaphore(sc);
   10652 	}
   10653 
   10654 	if (ret) {
   10655 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10656 			__func__);
   10657 		return 1;
   10658 	}
   10659 
   10660 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10661 		reg = CSR_READ(sc, WMREG_EECD);
   10662 
   10663 		/* Request EEPROM access. */
   10664 		reg |= EECD_EE_REQ;
   10665 		CSR_WRITE(sc, WMREG_EECD, reg);
   10666 
   10667 		/* ..and wait for it to be granted. */
   10668 		for (x = 0; x < 1000; x++) {
   10669 			reg = CSR_READ(sc, WMREG_EECD);
   10670 			if (reg & EECD_EE_GNT)
   10671 				break;
   10672 			delay(5);
   10673 		}
   10674 		if ((reg & EECD_EE_GNT) == 0) {
   10675 			aprint_error_dev(sc->sc_dev,
   10676 			    "could not acquire EEPROM GNT\n");
   10677 			reg &= ~EECD_EE_REQ;
   10678 			CSR_WRITE(sc, WMREG_EECD, reg);
   10679 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10680 				wm_put_swfwhw_semaphore(sc);
   10681 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10682 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10683 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10684 				wm_put_swsm_semaphore(sc);
   10685 			return 1;
   10686 		}
   10687 	}
   10688 
   10689 	return 0;
   10690 }
   10691 
   10692 /*
   10693  * wm_nvm_release:
   10694  *
   10695  *	Release the EEPROM mutex.
   10696  */
   10697 static void
   10698 wm_nvm_release(struct wm_softc *sc)
   10699 {
   10700 	uint32_t reg;
   10701 
   10702 	/* always success */
   10703 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10704 		return;
   10705 
   10706 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10707 		reg = CSR_READ(sc, WMREG_EECD);
   10708 		reg &= ~EECD_EE_REQ;
   10709 		CSR_WRITE(sc, WMREG_EECD, reg);
   10710 	}
   10711 
   10712 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10713 		wm_put_swfwhw_semaphore(sc);
   10714 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10715 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10716 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10717 		wm_put_swsm_semaphore(sc);
   10718 }
   10719 
   10720 static int
   10721 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10722 {
   10723 	uint32_t eecd = 0;
   10724 
   10725 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10726 	    || sc->sc_type == WM_T_82583) {
   10727 		eecd = CSR_READ(sc, WMREG_EECD);
   10728 
   10729 		/* Isolate bits 15 & 16 */
   10730 		eecd = ((eecd >> 15) & 0x03);
   10731 
   10732 		/* If both bits are set, device is Flash type */
   10733 		if (eecd == 0x03)
   10734 			return 0;
   10735 	}
   10736 	return 1;
   10737 }
   10738 
   10739 static int
   10740 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10741 {
   10742 	uint32_t eec;
   10743 
   10744 	eec = CSR_READ(sc, WMREG_EEC);
   10745 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10746 		return 1;
   10747 
   10748 	return 0;
   10749 }
   10750 
   10751 /*
   10752  * wm_nvm_validate_checksum
   10753  *
   10754  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10755  */
   10756 static int
   10757 wm_nvm_validate_checksum(struct wm_softc *sc)
   10758 {
   10759 	uint16_t checksum;
   10760 	uint16_t eeprom_data;
   10761 #ifdef WM_DEBUG
   10762 	uint16_t csum_wordaddr, valid_checksum;
   10763 #endif
   10764 	int i;
   10765 
   10766 	checksum = 0;
   10767 
   10768 	/* Don't check for I211 */
   10769 	if (sc->sc_type == WM_T_I211)
   10770 		return 0;
   10771 
   10772 #ifdef WM_DEBUG
   10773 	if (sc->sc_type == WM_T_PCH_LPT) {
   10774 		csum_wordaddr = NVM_OFF_COMPAT;
   10775 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10776 	} else {
   10777 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10778 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10779 	}
   10780 
   10781 	/* Dump EEPROM image for debug */
   10782 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10783 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10784 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10785 		/* XXX PCH_SPT? */
   10786 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10787 		if ((eeprom_data & valid_checksum) == 0) {
   10788 			DPRINTF(WM_DEBUG_NVM,
   10789 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10790 				device_xname(sc->sc_dev), eeprom_data,
   10791 				    valid_checksum));
   10792 		}
   10793 	}
   10794 
   10795 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10796 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10797 		for (i = 0; i < NVM_SIZE; i++) {
   10798 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10799 				printf("XXXX ");
   10800 			else
   10801 				printf("%04hx ", eeprom_data);
   10802 			if (i % 8 == 7)
   10803 				printf("\n");
   10804 		}
   10805 	}
   10806 
   10807 #endif /* WM_DEBUG */
   10808 
   10809 	for (i = 0; i < NVM_SIZE; i++) {
   10810 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10811 			return 1;
   10812 		checksum += eeprom_data;
   10813 	}
   10814 
   10815 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10816 #ifdef WM_DEBUG
   10817 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10818 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10819 #endif
   10820 	}
   10821 
   10822 	return 0;
   10823 }
   10824 
   10825 static void
   10826 wm_nvm_version_invm(struct wm_softc *sc)
   10827 {
   10828 	uint32_t dword;
   10829 
   10830 	/*
   10831 	 * Linux's code to decode version is very strange, so we don't
   10832 	 * obey that algorithm and just use word 61 as the document.
   10833 	 * Perhaps it's not perfect though...
   10834 	 *
   10835 	 * Example:
   10836 	 *
   10837 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10838 	 */
   10839 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10840 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10841 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10842 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10843 }
   10844 
   10845 static void
   10846 wm_nvm_version(struct wm_softc *sc)
   10847 {
   10848 	uint16_t major, minor, build, patch;
   10849 	uint16_t uid0, uid1;
   10850 	uint16_t nvm_data;
   10851 	uint16_t off;
   10852 	bool check_version = false;
   10853 	bool check_optionrom = false;
   10854 	bool have_build = false;
   10855 
   10856 	/*
   10857 	 * Version format:
   10858 	 *
   10859 	 * XYYZ
   10860 	 * X0YZ
   10861 	 * X0YY
   10862 	 *
   10863 	 * Example:
   10864 	 *
   10865 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10866 	 *	82571	0x50a6	5.10.6?
   10867 	 *	82572	0x506a	5.6.10?
   10868 	 *	82572EI	0x5069	5.6.9?
   10869 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10870 	 *		0x2013	2.1.3?
   10871 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10872 	 */
   10873 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10874 	switch (sc->sc_type) {
   10875 	case WM_T_82571:
   10876 	case WM_T_82572:
   10877 	case WM_T_82574:
   10878 	case WM_T_82583:
   10879 		check_version = true;
   10880 		check_optionrom = true;
   10881 		have_build = true;
   10882 		break;
   10883 	case WM_T_82575:
   10884 	case WM_T_82576:
   10885 	case WM_T_82580:
   10886 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10887 			check_version = true;
   10888 		break;
   10889 	case WM_T_I211:
   10890 		wm_nvm_version_invm(sc);
   10891 		goto printver;
   10892 	case WM_T_I210:
   10893 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10894 			wm_nvm_version_invm(sc);
   10895 			goto printver;
   10896 		}
   10897 		/* FALLTHROUGH */
   10898 	case WM_T_I350:
   10899 	case WM_T_I354:
   10900 		check_version = true;
   10901 		check_optionrom = true;
   10902 		break;
   10903 	default:
   10904 		return;
   10905 	}
   10906 	if (check_version) {
   10907 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10908 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10909 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10910 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10911 			build = nvm_data & NVM_BUILD_MASK;
   10912 			have_build = true;
   10913 		} else
   10914 			minor = nvm_data & 0x00ff;
   10915 
   10916 		/* Decimal */
   10917 		minor = (minor / 16) * 10 + (minor % 16);
   10918 		sc->sc_nvm_ver_major = major;
   10919 		sc->sc_nvm_ver_minor = minor;
   10920 
   10921 printver:
   10922 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10923 		    sc->sc_nvm_ver_minor);
   10924 		if (have_build) {
   10925 			sc->sc_nvm_ver_build = build;
   10926 			aprint_verbose(".%d", build);
   10927 		}
   10928 	}
   10929 	if (check_optionrom) {
   10930 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10931 		/* Option ROM Version */
   10932 		if ((off != 0x0000) && (off != 0xffff)) {
   10933 			off += NVM_COMBO_VER_OFF;
   10934 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10935 			wm_nvm_read(sc, off, 1, &uid0);
   10936 			if ((uid0 != 0) && (uid0 != 0xffff)
   10937 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10938 				/* 16bits */
   10939 				major = uid0 >> 8;
   10940 				build = (uid0 << 8) | (uid1 >> 8);
   10941 				patch = uid1 & 0x00ff;
   10942 				aprint_verbose(", option ROM Version %d.%d.%d",
   10943 				    major, build, patch);
   10944 			}
   10945 		}
   10946 	}
   10947 
   10948 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10949 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10950 }
   10951 
   10952 /*
   10953  * wm_nvm_read:
   10954  *
   10955  *	Read data from the serial EEPROM.
   10956  */
   10957 static int
   10958 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10959 {
   10960 	int rv;
   10961 
   10962 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10963 		return 1;
   10964 
   10965 	if (wm_nvm_acquire(sc))
   10966 		return 1;
   10967 
   10968 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10969 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10970 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10971 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10972 	else if (sc->sc_type == WM_T_PCH_SPT)
   10973 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   10974 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10975 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10976 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10977 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10978 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10979 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10980 	else
   10981 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10982 
   10983 	wm_nvm_release(sc);
   10984 	return rv;
   10985 }
   10986 
   10987 /*
   10988  * Hardware semaphores.
   10989  * Very complexed...
   10990  */
   10991 
   10992 static int
   10993 wm_get_swsm_semaphore(struct wm_softc *sc)
   10994 {
   10995 	int32_t timeout;
   10996 	uint32_t swsm;
   10997 
   10998 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10999 		/* Get the SW semaphore. */
   11000 		timeout = sc->sc_nvm_wordsize + 1;
   11001 		while (timeout) {
   11002 			swsm = CSR_READ(sc, WMREG_SWSM);
   11003 
   11004 			if ((swsm & SWSM_SMBI) == 0)
   11005 				break;
   11006 
   11007 			delay(50);
   11008 			timeout--;
   11009 		}
   11010 
   11011 		if (timeout == 0) {
   11012 			aprint_error_dev(sc->sc_dev,
   11013 			    "could not acquire SWSM SMBI\n");
   11014 			return 1;
   11015 		}
   11016 	}
   11017 
   11018 	/* Get the FW semaphore. */
   11019 	timeout = sc->sc_nvm_wordsize + 1;
   11020 	while (timeout) {
   11021 		swsm = CSR_READ(sc, WMREG_SWSM);
   11022 		swsm |= SWSM_SWESMBI;
   11023 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11024 		/* If we managed to set the bit we got the semaphore. */
   11025 		swsm = CSR_READ(sc, WMREG_SWSM);
   11026 		if (swsm & SWSM_SWESMBI)
   11027 			break;
   11028 
   11029 		delay(50);
   11030 		timeout--;
   11031 	}
   11032 
   11033 	if (timeout == 0) {
   11034 		aprint_error_dev(sc->sc_dev,
   11035 		    "could not acquire SWSM SWESMBI\n");
   11036 		/* Release semaphores */
   11037 		wm_put_swsm_semaphore(sc);
   11038 		return 1;
   11039 	}
   11040 	return 0;
   11041 }
   11042 
   11043 static void
   11044 wm_put_swsm_semaphore(struct wm_softc *sc)
   11045 {
   11046 	uint32_t swsm;
   11047 
   11048 	swsm = CSR_READ(sc, WMREG_SWSM);
   11049 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11050 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11051 }
   11052 
   11053 static int
   11054 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11055 {
   11056 	uint32_t swfw_sync;
   11057 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11058 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11059 	int timeout = 200;
   11060 
   11061 	for (timeout = 0; timeout < 200; timeout++) {
   11062 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11063 			if (wm_get_swsm_semaphore(sc)) {
   11064 				aprint_error_dev(sc->sc_dev,
   11065 				    "%s: failed to get semaphore\n",
   11066 				    __func__);
   11067 				return 1;
   11068 			}
   11069 		}
   11070 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11071 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11072 			swfw_sync |= swmask;
   11073 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11074 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11075 				wm_put_swsm_semaphore(sc);
   11076 			return 0;
   11077 		}
   11078 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11079 			wm_put_swsm_semaphore(sc);
   11080 		delay(5000);
   11081 	}
   11082 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11083 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11084 	return 1;
   11085 }
   11086 
   11087 static void
   11088 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11089 {
   11090 	uint32_t swfw_sync;
   11091 
   11092 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11093 		while (wm_get_swsm_semaphore(sc) != 0)
   11094 			continue;
   11095 	}
   11096 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11097 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11098 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11099 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11100 		wm_put_swsm_semaphore(sc);
   11101 }
   11102 
   11103 static int
   11104 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11105 {
   11106 	uint32_t ext_ctrl;
   11107 	int timeout = 200;
   11108 
   11109 	for (timeout = 0; timeout < 200; timeout++) {
   11110 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11111 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11112 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11113 
   11114 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11115 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11116 			return 0;
   11117 		delay(5000);
   11118 	}
   11119 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11120 	    device_xname(sc->sc_dev), ext_ctrl);
   11121 	return 1;
   11122 }
   11123 
   11124 static void
   11125 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11126 {
   11127 	uint32_t ext_ctrl;
   11128 
   11129 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11130 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11131 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11132 }
   11133 
   11134 static int
   11135 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11136 {
   11137 	int i = 0;
   11138 	uint32_t reg;
   11139 
   11140 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11141 	do {
   11142 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11143 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11144 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11145 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11146 			break;
   11147 		delay(2*1000);
   11148 		i++;
   11149 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11150 
   11151 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11152 		wm_put_hw_semaphore_82573(sc);
   11153 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11154 		    device_xname(sc->sc_dev));
   11155 		return -1;
   11156 	}
   11157 
   11158 	return 0;
   11159 }
   11160 
   11161 static void
   11162 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11163 {
   11164 	uint32_t reg;
   11165 
   11166 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11167 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11168 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11169 }
   11170 
   11171 /*
   11172  * Management mode and power management related subroutines.
   11173  * BMC, AMT, suspend/resume and EEE.
   11174  */
   11175 
   11176 #ifdef WM_WOL
   11177 static int
   11178 wm_check_mng_mode(struct wm_softc *sc)
   11179 {
   11180 	int rv;
   11181 
   11182 	switch (sc->sc_type) {
   11183 	case WM_T_ICH8:
   11184 	case WM_T_ICH9:
   11185 	case WM_T_ICH10:
   11186 	case WM_T_PCH:
   11187 	case WM_T_PCH2:
   11188 	case WM_T_PCH_LPT:
   11189 	case WM_T_PCH_SPT:
   11190 		rv = wm_check_mng_mode_ich8lan(sc);
   11191 		break;
   11192 	case WM_T_82574:
   11193 	case WM_T_82583:
   11194 		rv = wm_check_mng_mode_82574(sc);
   11195 		break;
   11196 	case WM_T_82571:
   11197 	case WM_T_82572:
   11198 	case WM_T_82573:
   11199 	case WM_T_80003:
   11200 		rv = wm_check_mng_mode_generic(sc);
   11201 		break;
   11202 	default:
   11203 		/* noting to do */
   11204 		rv = 0;
   11205 		break;
   11206 	}
   11207 
   11208 	return rv;
   11209 }
   11210 
   11211 static int
   11212 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11213 {
   11214 	uint32_t fwsm;
   11215 
   11216 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11217 
   11218 	if (((fwsm & FWSM_FW_VALID) != 0)
   11219 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11220 		return 1;
   11221 
   11222 	return 0;
   11223 }
   11224 
   11225 static int
   11226 wm_check_mng_mode_82574(struct wm_softc *sc)
   11227 {
   11228 	uint16_t data;
   11229 
   11230 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11231 
   11232 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11233 		return 1;
   11234 
   11235 	return 0;
   11236 }
   11237 
   11238 static int
   11239 wm_check_mng_mode_generic(struct wm_softc *sc)
   11240 {
   11241 	uint32_t fwsm;
   11242 
   11243 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11244 
   11245 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11246 		return 1;
   11247 
   11248 	return 0;
   11249 }
   11250 #endif /* WM_WOL */
   11251 
   11252 static int
   11253 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11254 {
   11255 	uint32_t manc, fwsm, factps;
   11256 
   11257 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11258 		return 0;
   11259 
   11260 	manc = CSR_READ(sc, WMREG_MANC);
   11261 
   11262 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11263 		device_xname(sc->sc_dev), manc));
   11264 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11265 		return 0;
   11266 
   11267 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11268 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11269 		factps = CSR_READ(sc, WMREG_FACTPS);
   11270 		if (((factps & FACTPS_MNGCG) == 0)
   11271 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11272 			return 1;
   11273 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11274 		uint16_t data;
   11275 
   11276 		factps = CSR_READ(sc, WMREG_FACTPS);
   11277 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11278 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11279 			device_xname(sc->sc_dev), factps, data));
   11280 		if (((factps & FACTPS_MNGCG) == 0)
   11281 		    && ((data & NVM_CFG2_MNGM_MASK)
   11282 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11283 			return 1;
   11284 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11285 	    && ((manc & MANC_ASF_EN) == 0))
   11286 		return 1;
   11287 
   11288 	return 0;
   11289 }
   11290 
   11291 static bool
   11292 wm_phy_resetisblocked(struct wm_softc *sc)
   11293 {
   11294 	bool blocked = false;
   11295 	uint32_t reg;
   11296 	int i = 0;
   11297 
   11298 	switch (sc->sc_type) {
   11299 	case WM_T_ICH8:
   11300 	case WM_T_ICH9:
   11301 	case WM_T_ICH10:
   11302 	case WM_T_PCH:
   11303 	case WM_T_PCH2:
   11304 	case WM_T_PCH_LPT:
   11305 	case WM_T_PCH_SPT:
   11306 		do {
   11307 			reg = CSR_READ(sc, WMREG_FWSM);
   11308 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11309 				blocked = true;
   11310 				delay(10*1000);
   11311 				continue;
   11312 			}
   11313 			blocked = false;
   11314 		} while (blocked && (i++ < 10));
   11315 		return blocked;
   11316 		break;
   11317 	case WM_T_82571:
   11318 	case WM_T_82572:
   11319 	case WM_T_82573:
   11320 	case WM_T_82574:
   11321 	case WM_T_82583:
   11322 	case WM_T_80003:
   11323 		reg = CSR_READ(sc, WMREG_MANC);
   11324 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11325 			return true;
   11326 		else
   11327 			return false;
   11328 		break;
   11329 	default:
   11330 		/* no problem */
   11331 		break;
   11332 	}
   11333 
   11334 	return false;
   11335 }
   11336 
   11337 static void
   11338 wm_get_hw_control(struct wm_softc *sc)
   11339 {
   11340 	uint32_t reg;
   11341 
   11342 	switch (sc->sc_type) {
   11343 	case WM_T_82573:
   11344 		reg = CSR_READ(sc, WMREG_SWSM);
   11345 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11346 		break;
   11347 	case WM_T_82571:
   11348 	case WM_T_82572:
   11349 	case WM_T_82574:
   11350 	case WM_T_82583:
   11351 	case WM_T_80003:
   11352 	case WM_T_ICH8:
   11353 	case WM_T_ICH9:
   11354 	case WM_T_ICH10:
   11355 	case WM_T_PCH:
   11356 	case WM_T_PCH2:
   11357 	case WM_T_PCH_LPT:
   11358 	case WM_T_PCH_SPT:
   11359 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11360 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11361 		break;
   11362 	default:
   11363 		break;
   11364 	}
   11365 }
   11366 
   11367 static void
   11368 wm_release_hw_control(struct wm_softc *sc)
   11369 {
   11370 	uint32_t reg;
   11371 
   11372 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11373 		return;
   11374 
   11375 	if (sc->sc_type == WM_T_82573) {
   11376 		reg = CSR_READ(sc, WMREG_SWSM);
   11377 		reg &= ~SWSM_DRV_LOAD;
   11378 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11379 	} else {
   11380 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11381 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11382 	}
   11383 }
   11384 
   11385 static void
   11386 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11387 {
   11388 	uint32_t reg;
   11389 
   11390 	if (sc->sc_type < WM_T_PCH2)
   11391 		return;
   11392 
   11393 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11394 
   11395 	if (gate)
   11396 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11397 	else
   11398 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11399 
   11400 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11401 }
   11402 
   11403 static void
   11404 wm_smbustopci(struct wm_softc *sc)
   11405 {
   11406 	uint32_t fwsm, reg;
   11407 
   11408 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11409 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11410 
   11411 	/* Acquire semaphore */
   11412 	wm_get_swfwhw_semaphore(sc);
   11413 
   11414 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11415 	if (((fwsm & FWSM_FW_VALID) == 0)
   11416 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11417 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11418 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11419 			reg |= CTRL_EXT_FORCE_SMBUS;
   11420 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11421 			CSR_WRITE_FLUSH(sc);
   11422 			delay(50*1000);
   11423 		}
   11424 
   11425 		/* Toggle LANPHYPC */
   11426 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11427 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11428 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11429 		CSR_WRITE_FLUSH(sc);
   11430 		delay(10);
   11431 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11432 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11433 		CSR_WRITE_FLUSH(sc);
   11434 		delay(50*1000);
   11435 
   11436 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11437 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11438 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11439 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11440 		}
   11441 	}
   11442 
   11443 	/* Release semaphore */
   11444 	wm_put_swfwhw_semaphore(sc);
   11445 
   11446 	/*
   11447 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11448 	 */
   11449 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11450 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11451 }
   11452 
   11453 static void
   11454 wm_init_manageability(struct wm_softc *sc)
   11455 {
   11456 
   11457 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11458 		device_xname(sc->sc_dev), __func__));
   11459 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11460 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11461 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11462 
   11463 		/* Disable hardware interception of ARP */
   11464 		manc &= ~MANC_ARP_EN;
   11465 
   11466 		/* Enable receiving management packets to the host */
   11467 		if (sc->sc_type >= WM_T_82571) {
   11468 			manc |= MANC_EN_MNG2HOST;
   11469 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11470 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11471 		}
   11472 
   11473 		CSR_WRITE(sc, WMREG_MANC, manc);
   11474 	}
   11475 }
   11476 
   11477 static void
   11478 wm_release_manageability(struct wm_softc *sc)
   11479 {
   11480 
   11481 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11482 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11483 
   11484 		manc |= MANC_ARP_EN;
   11485 		if (sc->sc_type >= WM_T_82571)
   11486 			manc &= ~MANC_EN_MNG2HOST;
   11487 
   11488 		CSR_WRITE(sc, WMREG_MANC, manc);
   11489 	}
   11490 }
   11491 
   11492 static void
   11493 wm_get_wakeup(struct wm_softc *sc)
   11494 {
   11495 
   11496 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11497 	switch (sc->sc_type) {
   11498 	case WM_T_82573:
   11499 	case WM_T_82583:
   11500 		sc->sc_flags |= WM_F_HAS_AMT;
   11501 		/* FALLTHROUGH */
   11502 	case WM_T_80003:
   11503 	case WM_T_82541:
   11504 	case WM_T_82547:
   11505 	case WM_T_82571:
   11506 	case WM_T_82572:
   11507 	case WM_T_82574:
   11508 	case WM_T_82575:
   11509 	case WM_T_82576:
   11510 	case WM_T_82580:
   11511 	case WM_T_I350:
   11512 	case WM_T_I354:
   11513 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11514 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11515 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11516 		break;
   11517 	case WM_T_ICH8:
   11518 	case WM_T_ICH9:
   11519 	case WM_T_ICH10:
   11520 	case WM_T_PCH:
   11521 	case WM_T_PCH2:
   11522 	case WM_T_PCH_LPT:
   11523 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11524 		sc->sc_flags |= WM_F_HAS_AMT;
   11525 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11526 		break;
   11527 	default:
   11528 		break;
   11529 	}
   11530 
   11531 	/* 1: HAS_MANAGE */
   11532 	if (wm_enable_mng_pass_thru(sc) != 0)
   11533 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11534 
   11535 #ifdef WM_DEBUG
   11536 	printf("\n");
   11537 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11538 		printf("HAS_AMT,");
   11539 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11540 		printf("ARC_SUBSYS_VALID,");
   11541 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11542 		printf("ASF_FIRMWARE_PRES,");
   11543 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11544 		printf("HAS_MANAGE,");
   11545 	printf("\n");
   11546 #endif
   11547 	/*
   11548 	 * Note that the WOL flags is set after the resetting of the eeprom
   11549 	 * stuff
   11550 	 */
   11551 }
   11552 
   11553 #ifdef WM_WOL
   11554 /* WOL in the newer chipset interfaces (pchlan) */
   11555 static void
   11556 wm_enable_phy_wakeup(struct wm_softc *sc)
   11557 {
   11558 #if 0
   11559 	uint16_t preg;
   11560 
   11561 	/* Copy MAC RARs to PHY RARs */
   11562 
   11563 	/* Copy MAC MTA to PHY MTA */
   11564 
   11565 	/* Configure PHY Rx Control register */
   11566 
   11567 	/* Enable PHY wakeup in MAC register */
   11568 
   11569 	/* Configure and enable PHY wakeup in PHY registers */
   11570 
   11571 	/* Activate PHY wakeup */
   11572 
   11573 	/* XXX */
   11574 #endif
   11575 }
   11576 
   11577 /* Power down workaround on D3 */
   11578 static void
   11579 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11580 {
   11581 	uint32_t reg;
   11582 	int i;
   11583 
   11584 	for (i = 0; i < 2; i++) {
   11585 		/* Disable link */
   11586 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11587 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11588 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11589 
   11590 		/*
   11591 		 * Call gig speed drop workaround on Gig disable before
   11592 		 * accessing any PHY registers
   11593 		 */
   11594 		if (sc->sc_type == WM_T_ICH8)
   11595 			wm_gig_downshift_workaround_ich8lan(sc);
   11596 
   11597 		/* Write VR power-down enable */
   11598 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11599 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11600 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11601 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11602 
   11603 		/* Read it back and test */
   11604 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11605 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11606 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11607 			break;
   11608 
   11609 		/* Issue PHY reset and repeat at most one more time */
   11610 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11611 	}
   11612 }
   11613 
   11614 static void
   11615 wm_enable_wakeup(struct wm_softc *sc)
   11616 {
   11617 	uint32_t reg, pmreg;
   11618 	pcireg_t pmode;
   11619 
   11620 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11621 		&pmreg, NULL) == 0)
   11622 		return;
   11623 
   11624 	/* Advertise the wakeup capability */
   11625 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11626 	    | CTRL_SWDPIN(3));
   11627 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11628 
   11629 	/* ICH workaround */
   11630 	switch (sc->sc_type) {
   11631 	case WM_T_ICH8:
   11632 	case WM_T_ICH9:
   11633 	case WM_T_ICH10:
   11634 	case WM_T_PCH:
   11635 	case WM_T_PCH2:
   11636 	case WM_T_PCH_LPT:
   11637 	case WM_T_PCH_SPT:
   11638 		/* Disable gig during WOL */
   11639 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11640 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11641 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11642 		if (sc->sc_type == WM_T_PCH)
   11643 			wm_gmii_reset(sc);
   11644 
   11645 		/* Power down workaround */
   11646 		if (sc->sc_phytype == WMPHY_82577) {
   11647 			struct mii_softc *child;
   11648 
   11649 			/* Assume that the PHY is copper */
   11650 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11651 			if (child->mii_mpd_rev <= 2)
   11652 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11653 				    (768 << 5) | 25, 0x0444); /* magic num */
   11654 		}
   11655 		break;
   11656 	default:
   11657 		break;
   11658 	}
   11659 
   11660 	/* Keep the laser running on fiber adapters */
   11661 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11662 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11663 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11664 		reg |= CTRL_EXT_SWDPIN(3);
   11665 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11666 	}
   11667 
   11668 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11669 #if 0	/* for the multicast packet */
   11670 	reg |= WUFC_MC;
   11671 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11672 #endif
   11673 
   11674 	if (sc->sc_type == WM_T_PCH) {
   11675 		wm_enable_phy_wakeup(sc);
   11676 	} else {
   11677 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11678 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11679 	}
   11680 
   11681 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11682 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11683 		|| (sc->sc_type == WM_T_PCH2))
   11684 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11685 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11686 
   11687 	/* Request PME */
   11688 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11689 #if 0
   11690 	/* Disable WOL */
   11691 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11692 #else
   11693 	/* For WOL */
   11694 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11695 #endif
   11696 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11697 }
   11698 #endif /* WM_WOL */
   11699 
   11700 /* LPLU */
   11701 
   11702 static void
   11703 wm_lplu_d0_disable(struct wm_softc *sc)
   11704 {
   11705 	uint32_t reg;
   11706 
   11707 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11708 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11709 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11710 }
   11711 
   11712 static void
   11713 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11714 {
   11715 	uint32_t reg;
   11716 
   11717 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11718 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11719 	reg |= HV_OEM_BITS_ANEGNOW;
   11720 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11721 }
   11722 
   11723 /* EEE */
   11724 
   11725 static void
   11726 wm_set_eee_i350(struct wm_softc *sc)
   11727 {
   11728 	uint32_t ipcnfg, eeer;
   11729 
   11730 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11731 	eeer = CSR_READ(sc, WMREG_EEER);
   11732 
   11733 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11734 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11735 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11736 		    | EEER_LPI_FC);
   11737 	} else {
   11738 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11739 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11740 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11741 		    | EEER_LPI_FC);
   11742 	}
   11743 
   11744 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11745 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11746 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11747 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11748 }
   11749 
   11750 /*
   11751  * Workarounds (mainly PHY related).
   11752  * Basically, PHY's workarounds are in the PHY drivers.
   11753  */
   11754 
   11755 /* Work-around for 82566 Kumeran PCS lock loss */
   11756 static void
   11757 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11758 {
   11759 #if 0
   11760 	int miistatus, active, i;
   11761 	int reg;
   11762 
   11763 	miistatus = sc->sc_mii.mii_media_status;
   11764 
   11765 	/* If the link is not up, do nothing */
   11766 	if ((miistatus & IFM_ACTIVE) == 0)
   11767 		return;
   11768 
   11769 	active = sc->sc_mii.mii_media_active;
   11770 
   11771 	/* Nothing to do if the link is other than 1Gbps */
   11772 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11773 		return;
   11774 
   11775 	for (i = 0; i < 10; i++) {
   11776 		/* read twice */
   11777 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11778 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11779 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11780 			goto out;	/* GOOD! */
   11781 
   11782 		/* Reset the PHY */
   11783 		wm_gmii_reset(sc);
   11784 		delay(5*1000);
   11785 	}
   11786 
   11787 	/* Disable GigE link negotiation */
   11788 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11789 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11790 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11791 
   11792 	/*
   11793 	 * Call gig speed drop workaround on Gig disable before accessing
   11794 	 * any PHY registers.
   11795 	 */
   11796 	wm_gig_downshift_workaround_ich8lan(sc);
   11797 
   11798 out:
   11799 	return;
   11800 #endif
   11801 }
   11802 
   11803 /* WOL from S5 stops working */
   11804 static void
   11805 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11806 {
   11807 	uint16_t kmrn_reg;
   11808 
   11809 	/* Only for igp3 */
   11810 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11811 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11812 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11813 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11814 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11815 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11816 	}
   11817 }
   11818 
   11819 /*
   11820  * Workaround for pch's PHYs
   11821  * XXX should be moved to new PHY driver?
   11822  */
   11823 static void
   11824 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11825 {
   11826 	if (sc->sc_phytype == WMPHY_82577)
   11827 		wm_set_mdio_slow_mode_hv(sc);
   11828 
   11829 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11830 
   11831 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11832 
   11833 	/* 82578 */
   11834 	if (sc->sc_phytype == WMPHY_82578) {
   11835 		/* PCH rev. < 3 */
   11836 		if (sc->sc_rev < 3) {
   11837 			/* XXX 6 bit shift? Why? Is it page2? */
   11838 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11839 			    0x66c0);
   11840 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11841 			    0xffff);
   11842 		}
   11843 
   11844 		/* XXX phy rev. < 2 */
   11845 	}
   11846 
   11847 	/* Select page 0 */
   11848 
   11849 	/* XXX acquire semaphore */
   11850 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11851 	/* XXX release semaphore */
   11852 
   11853 	/*
   11854 	 * Configure the K1 Si workaround during phy reset assuming there is
   11855 	 * link so that it disables K1 if link is in 1Gbps.
   11856 	 */
   11857 	wm_k1_gig_workaround_hv(sc, 1);
   11858 }
   11859 
   11860 static void
   11861 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11862 {
   11863 
   11864 	wm_set_mdio_slow_mode_hv(sc);
   11865 }
   11866 
   11867 static void
   11868 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11869 {
   11870 	int k1_enable = sc->sc_nvm_k1_enabled;
   11871 
   11872 	/* XXX acquire semaphore */
   11873 
   11874 	if (link) {
   11875 		k1_enable = 0;
   11876 
   11877 		/* Link stall fix for link up */
   11878 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11879 	} else {
   11880 		/* Link stall fix for link down */
   11881 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11882 	}
   11883 
   11884 	wm_configure_k1_ich8lan(sc, k1_enable);
   11885 
   11886 	/* XXX release semaphore */
   11887 }
   11888 
   11889 static void
   11890 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11891 {
   11892 	uint32_t reg;
   11893 
   11894 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11895 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11896 	    reg | HV_KMRN_MDIO_SLOW);
   11897 }
   11898 
   11899 static void
   11900 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11901 {
   11902 	uint32_t ctrl, ctrl_ext, tmp;
   11903 	uint16_t kmrn_reg;
   11904 
   11905 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11906 
   11907 	if (k1_enable)
   11908 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11909 	else
   11910 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11911 
   11912 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11913 
   11914 	delay(20);
   11915 
   11916 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11917 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11918 
   11919 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11920 	tmp |= CTRL_FRCSPD;
   11921 
   11922 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11923 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11924 	CSR_WRITE_FLUSH(sc);
   11925 	delay(20);
   11926 
   11927 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11928 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11929 	CSR_WRITE_FLUSH(sc);
   11930 	delay(20);
   11931 }
   11932 
   11933 /* special case - for 82575 - need to do manual init ... */
   11934 static void
   11935 wm_reset_init_script_82575(struct wm_softc *sc)
   11936 {
   11937 	/*
   11938 	 * remark: this is untested code - we have no board without EEPROM
   11939 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11940 	 */
   11941 
   11942 	/* SerDes configuration via SERDESCTRL */
   11943 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11944 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11945 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11946 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11947 
   11948 	/* CCM configuration via CCMCTL register */
   11949 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11950 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11951 
   11952 	/* PCIe lanes configuration */
   11953 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11954 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11955 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11956 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11957 
   11958 	/* PCIe PLL Configuration */
   11959 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11960 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11961 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11962 }
   11963 
   11964 static void
   11965 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11966 {
   11967 	uint32_t reg;
   11968 	uint16_t nvmword;
   11969 	int rv;
   11970 
   11971 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11972 		return;
   11973 
   11974 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11975 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11976 	if (rv != 0) {
   11977 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11978 		    __func__);
   11979 		return;
   11980 	}
   11981 
   11982 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11983 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11984 		reg |= MDICNFG_DEST;
   11985 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11986 		reg |= MDICNFG_COM_MDIO;
   11987 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11988 }
   11989 
   11990 /*
   11991  * I210 Errata 25 and I211 Errata 10
   11992  * Slow System Clock.
   11993  */
   11994 static void
   11995 wm_pll_workaround_i210(struct wm_softc *sc)
   11996 {
   11997 	uint32_t mdicnfg, wuc;
   11998 	uint32_t reg;
   11999 	pcireg_t pcireg;
   12000 	uint32_t pmreg;
   12001 	uint16_t nvmword, tmp_nvmword;
   12002 	int phyval;
   12003 	bool wa_done = false;
   12004 	int i;
   12005 
   12006 	/* Save WUC and MDICNFG registers */
   12007 	wuc = CSR_READ(sc, WMREG_WUC);
   12008 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12009 
   12010 	reg = mdicnfg & ~MDICNFG_DEST;
   12011 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12012 
   12013 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12014 		nvmword = INVM_DEFAULT_AL;
   12015 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12016 
   12017 	/* Get Power Management cap offset */
   12018 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12019 		&pmreg, NULL) == 0)
   12020 		return;
   12021 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12022 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12023 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12024 
   12025 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12026 			break; /* OK */
   12027 		}
   12028 
   12029 		wa_done = true;
   12030 		/* Directly reset the internal PHY */
   12031 		reg = CSR_READ(sc, WMREG_CTRL);
   12032 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12033 
   12034 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12035 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12036 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12037 
   12038 		CSR_WRITE(sc, WMREG_WUC, 0);
   12039 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12040 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12041 
   12042 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12043 		    pmreg + PCI_PMCSR);
   12044 		pcireg |= PCI_PMCSR_STATE_D3;
   12045 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12046 		    pmreg + PCI_PMCSR, pcireg);
   12047 		delay(1000);
   12048 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12049 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12050 		    pmreg + PCI_PMCSR, pcireg);
   12051 
   12052 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12053 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12054 
   12055 		/* Restore WUC register */
   12056 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12057 	}
   12058 
   12059 	/* Restore MDICNFG setting */
   12060 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12061 	if (wa_done)
   12062 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12063 }
   12064