Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.406
      1 /*	$NetBSD: if_wm.c,v 1.406 2016/05/19 08:35:03 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.406 2016/05/19 08:35:03 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <netinet/in.h>			/* XXX for struct ip */
    119 #include <netinet/in_systm.h>		/* XXX for struct ip */
    120 #include <netinet/ip.h>			/* XXX for struct ip */
    121 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    122 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    123 
    124 #include <sys/bus.h>
    125 #include <sys/intr.h>
    126 #include <machine/endian.h>
    127 
    128 #include <dev/mii/mii.h>
    129 #include <dev/mii/miivar.h>
    130 #include <dev/mii/miidevs.h>
    131 #include <dev/mii/mii_bitbang.h>
    132 #include <dev/mii/ikphyreg.h>
    133 #include <dev/mii/igphyreg.h>
    134 #include <dev/mii/igphyvar.h>
    135 #include <dev/mii/inbmphyreg.h>
    136 
    137 #include <dev/pci/pcireg.h>
    138 #include <dev/pci/pcivar.h>
    139 #include <dev/pci/pcidevs.h>
    140 
    141 #include <dev/pci/if_wmreg.h>
    142 #include <dev/pci/if_wmvar.h>
    143 
    144 #ifdef WM_DEBUG
    145 #define	WM_DEBUG_LINK		0x01
    146 #define	WM_DEBUG_TX		0x02
    147 #define	WM_DEBUG_RX		0x04
    148 #define	WM_DEBUG_GMII		0x08
    149 #define	WM_DEBUG_MANAGE		0x10
    150 #define	WM_DEBUG_NVM		0x20
    151 #define	WM_DEBUG_INIT		0x40
    152 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    153     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
    154 
    155 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    156 #else
    157 #define	DPRINTF(x, y)	/* nothing */
    158 #endif /* WM_DEBUG */
    159 
    160 #ifdef NET_MPSAFE
    161 #define WM_MPSAFE	1
    162 #endif
    163 
    164 /*
    165  * This device driver's max interrupt numbers.
    166  */
    167 #define WM_MAX_NQUEUEINTR	16
    168 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    169 
    170 /*
    171  * Transmit descriptor list size.  Due to errata, we can only have
    172  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    173  * on >= 82544.  We tell the upper layers that they can queue a lot
    174  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    175  * of them at a time.
    176  *
    177  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    178  * chains containing many small mbufs have been observed in zero-copy
    179  * situations with jumbo frames.
    180  */
    181 #define	WM_NTXSEGS		256
    182 #define	WM_IFQUEUELEN		256
    183 #define	WM_TXQUEUELEN_MAX	64
    184 #define	WM_TXQUEUELEN_MAX_82547	16
    185 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    186 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    187 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    188 #define	WM_NTXDESC_82542	256
    189 #define	WM_NTXDESC_82544	4096
    190 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    191 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    192 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    193 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    194 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    195 
    196 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    197 
    198 #define	WM_TXINTERQSIZE		256
    199 
    200 /*
    201  * Receive descriptor list size.  We have one Rx buffer for normal
    202  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    203  * packet.  We allocate 256 receive descriptors, each with a 2k
    204  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    205  */
    206 #define	WM_NRXDESC		256
    207 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    208 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    209 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    210 
    211 typedef union txdescs {
    212 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    213 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    214 } txdescs_t;
    215 
    216 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    217 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    218 
    219 /*
    220  * Software state for transmit jobs.
    221  */
    222 struct wm_txsoft {
    223 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    224 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    225 	int txs_firstdesc;		/* first descriptor in packet */
    226 	int txs_lastdesc;		/* last descriptor in packet */
    227 	int txs_ndesc;			/* # of descriptors used */
    228 };
    229 
    230 /*
    231  * Software state for receive buffers.  Each descriptor gets a
    232  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    233  * more than one buffer, we chain them together.
    234  */
    235 struct wm_rxsoft {
    236 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    237 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    238 };
    239 
    240 #define WM_LINKUP_TIMEOUT	50
    241 
    242 static uint16_t swfwphysem[] = {
    243 	SWFW_PHY0_SM,
    244 	SWFW_PHY1_SM,
    245 	SWFW_PHY2_SM,
    246 	SWFW_PHY3_SM
    247 };
    248 
    249 static const uint32_t wm_82580_rxpbs_table[] = {
    250 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    251 };
    252 
    253 struct wm_softc;
    254 
    255 struct wm_txqueue {
    256 	kmutex_t *txq_lock;		/* lock for tx operations */
    257 
    258 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    259 
    260 	/* Software state for the transmit descriptors. */
    261 	int txq_num;			/* must be a power of two */
    262 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    263 
    264 	/* TX control data structures. */
    265 	int txq_ndesc;			/* must be a power of two */
    266 	size_t txq_descsize;		/* a tx descriptor size */
    267 	txdescs_t *txq_descs_u;
    268         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    269 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    270 	int txq_desc_rseg;		/* real number of control segment */
    271 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    272 #define	txq_descs	txq_descs_u->sctxu_txdescs
    273 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    274 
    275 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    276 
    277 	int txq_free;			/* number of free Tx descriptors */
    278 	int txq_next;			/* next ready Tx descriptor */
    279 
    280 	int txq_sfree;			/* number of free Tx jobs */
    281 	int txq_snext;			/* next free Tx job */
    282 	int txq_sdirty;			/* dirty Tx jobs */
    283 
    284 	/* These 4 variables are used only on the 82547. */
    285 	int txq_fifo_size;		/* Tx FIFO size */
    286 	int txq_fifo_head;		/* current head of FIFO */
    287 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    288 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    289 
    290 	/*
    291 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    292 	 * CPUs. This queue intermediate them without block.
    293 	 */
    294 	pcq_t *txq_interq;
    295 
    296 	/*
    297 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    298 	 * to manage Tx H/W queue's busy flag.
    299 	 */
    300 	int txq_flags;			/* flags for H/W queue, see below */
    301 #define	WM_TXQ_NO_SPACE	0x1
    302 
    303 	/* XXX which event counter is required? */
    304 };
    305 
    306 struct wm_rxqueue {
    307 	kmutex_t *rxq_lock;		/* lock for rx operations */
    308 
    309 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    310 
    311 	/* Software state for the receive descriptors. */
    312 	wiseman_rxdesc_t *rxq_descs;
    313 
    314 	/* RX control data structures. */
    315 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    316 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    317 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    318 	int rxq_desc_rseg;		/* real number of control segment */
    319 	size_t rxq_desc_size;		/* control data size */
    320 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    321 
    322 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    323 
    324 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    325 	int rxq_discard;
    326 	int rxq_len;
    327 	struct mbuf *rxq_head;
    328 	struct mbuf *rxq_tail;
    329 	struct mbuf **rxq_tailp;
    330 
    331 	/* XXX which event counter is required? */
    332 };
    333 
    334 struct wm_queue {
    335 	int wmq_id;			/* index of transmit and receive queues */
    336 	int wmq_intr_idx;		/* index of MSI-X tables */
    337 
    338 	struct wm_txqueue wmq_txq;
    339 	struct wm_rxqueue wmq_rxq;
    340 };
    341 
    342 /*
    343  * Software state per device.
    344  */
    345 struct wm_softc {
    346 	device_t sc_dev;		/* generic device information */
    347 	bus_space_tag_t sc_st;		/* bus space tag */
    348 	bus_space_handle_t sc_sh;	/* bus space handle */
    349 	bus_size_t sc_ss;		/* bus space size */
    350 	bus_space_tag_t sc_iot;		/* I/O space tag */
    351 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    352 	bus_size_t sc_ios;		/* I/O space size */
    353 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    354 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    355 	bus_size_t sc_flashs;		/* flash registers space size */
    356 	off_t sc_flashreg_offset;	/*
    357 					 * offset to flash registers from
    358 					 * start of BAR
    359 					 */
    360 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    361 
    362 	struct ethercom sc_ethercom;	/* ethernet common data */
    363 	struct mii_data sc_mii;		/* MII/media information */
    364 
    365 	pci_chipset_tag_t sc_pc;
    366 	pcitag_t sc_pcitag;
    367 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    368 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    369 
    370 	uint16_t sc_pcidevid;		/* PCI device ID */
    371 	wm_chip_type sc_type;		/* MAC type */
    372 	int sc_rev;			/* MAC revision */
    373 	wm_phy_type sc_phytype;		/* PHY type */
    374 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    375 #define	WM_MEDIATYPE_UNKNOWN		0x00
    376 #define	WM_MEDIATYPE_FIBER		0x01
    377 #define	WM_MEDIATYPE_COPPER		0x02
    378 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    379 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    380 	int sc_flags;			/* flags; see below */
    381 	int sc_if_flags;		/* last if_flags */
    382 	int sc_flowflags;		/* 802.3x flow control flags */
    383 	int sc_align_tweak;
    384 
    385 	void *sc_ihs[WM_MAX_NINTR];	/*
    386 					 * interrupt cookie.
    387 					 * legacy and msi use sc_ihs[0].
    388 					 */
    389 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    390 	int sc_nintrs;			/* number of interrupts */
    391 
    392 	int sc_link_intr_idx;		/* index of MSI-X tables */
    393 
    394 	callout_t sc_tick_ch;		/* tick callout */
    395 	bool sc_stopping;
    396 
    397 	int sc_nvm_ver_major;
    398 	int sc_nvm_ver_minor;
    399 	int sc_nvm_ver_build;
    400 	int sc_nvm_addrbits;		/* NVM address bits */
    401 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    402 	int sc_ich8_flash_base;
    403 	int sc_ich8_flash_bank_size;
    404 	int sc_nvm_k1_enabled;
    405 
    406 	int sc_nqueues;
    407 	struct wm_queue *sc_queue;
    408 
    409 	int sc_affinity_offset;
    410 
    411 #ifdef WM_EVENT_COUNTERS
    412 	/* Event counters. */
    413 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    414 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    415 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    416 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    417 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    418 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    419 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    420 
    421 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    422 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    423 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    424 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    425 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    426 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    427 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    428 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    429 
    430 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    431 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
    432 
    433 	struct evcnt sc_ev_tu;		/* Tx underrun */
    434 
    435 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    436 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    437 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    438 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    439 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    440 #endif /* WM_EVENT_COUNTERS */
    441 
    442 	/* This variable are used only on the 82547. */
    443 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    444 
    445 	uint32_t sc_ctrl;		/* prototype CTRL register */
    446 #if 0
    447 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    448 #endif
    449 	uint32_t sc_icr;		/* prototype interrupt bits */
    450 	uint32_t sc_itr;		/* prototype intr throttling reg */
    451 	uint32_t sc_tctl;		/* prototype TCTL register */
    452 	uint32_t sc_rctl;		/* prototype RCTL register */
    453 	uint32_t sc_txcw;		/* prototype TXCW register */
    454 	uint32_t sc_tipg;		/* prototype TIPG register */
    455 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    456 	uint32_t sc_pba;		/* prototype PBA register */
    457 
    458 	int sc_tbi_linkup;		/* TBI link status */
    459 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    460 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    461 
    462 	int sc_mchash_type;		/* multicast filter offset */
    463 
    464 	krndsource_t rnd_source;	/* random source */
    465 
    466 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    467 
    468 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    469 };
    470 
    471 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    472 #define WM_TX_TRYLOCK(_txq)	((_txq)->txq_lock == NULL || mutex_tryenter((_txq)->txq_lock))
    473 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    474 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    475 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    476 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    477 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    478 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    479 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    480 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    481 
    482 #ifdef WM_MPSAFE
    483 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    484 #else
    485 #define CALLOUT_FLAGS	0
    486 #endif
    487 
    488 #define	WM_RXCHAIN_RESET(rxq)						\
    489 do {									\
    490 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    491 	*(rxq)->rxq_tailp = NULL;					\
    492 	(rxq)->rxq_len = 0;						\
    493 } while (/*CONSTCOND*/0)
    494 
    495 #define	WM_RXCHAIN_LINK(rxq, m)						\
    496 do {									\
    497 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    498 	(rxq)->rxq_tailp = &(m)->m_next;				\
    499 } while (/*CONSTCOND*/0)
    500 
    501 #ifdef WM_EVENT_COUNTERS
    502 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    503 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    504 #else
    505 #define	WM_EVCNT_INCR(ev)	/* nothing */
    506 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    507 #endif
    508 
    509 #define	CSR_READ(sc, reg)						\
    510 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    511 #define	CSR_WRITE(sc, reg, val)						\
    512 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    513 #define	CSR_WRITE_FLUSH(sc)						\
    514 	(void) CSR_READ((sc), WMREG_STATUS)
    515 
    516 #define ICH8_FLASH_READ32(sc, reg)					\
    517 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    518 	    (reg) + sc->sc_flashreg_offset)
    519 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    520 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    521 	    (reg) + sc->sc_flashreg_offset, (data))
    522 
    523 #define ICH8_FLASH_READ16(sc, reg)					\
    524 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    525 	    (reg) + sc->sc_flashreg_offset)
    526 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    527 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    528 	    (reg) + sc->sc_flashreg_offset, (data))
    529 
    530 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    531 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    532 
    533 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    534 #define	WM_CDTXADDR_HI(txq, x)						\
    535 	(sizeof(bus_addr_t) == 8 ?					\
    536 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    537 
    538 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    539 #define	WM_CDRXADDR_HI(rxq, x)						\
    540 	(sizeof(bus_addr_t) == 8 ?					\
    541 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    542 
    543 /*
    544  * Register read/write functions.
    545  * Other than CSR_{READ|WRITE}().
    546  */
    547 #if 0
    548 static inline uint32_t wm_io_read(struct wm_softc *, int);
    549 #endif
    550 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    551 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    552 	uint32_t, uint32_t);
    553 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    554 
    555 /*
    556  * Descriptor sync/init functions.
    557  */
    558 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    559 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    560 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    561 
    562 /*
    563  * Device driver interface functions and commonly used functions.
    564  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    565  */
    566 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    567 static int	wm_match(device_t, cfdata_t, void *);
    568 static void	wm_attach(device_t, device_t, void *);
    569 static int	wm_detach(device_t, int);
    570 static bool	wm_suspend(device_t, const pmf_qual_t *);
    571 static bool	wm_resume(device_t, const pmf_qual_t *);
    572 static void	wm_watchdog(struct ifnet *);
    573 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    574 static void	wm_tick(void *);
    575 static int	wm_ifflags_cb(struct ethercom *);
    576 static int	wm_ioctl(struct ifnet *, u_long, void *);
    577 /* MAC address related */
    578 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    579 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    580 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    581 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    582 static void	wm_set_filter(struct wm_softc *);
    583 /* Reset and init related */
    584 static void	wm_set_vlan(struct wm_softc *);
    585 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    586 static void	wm_get_auto_rd_done(struct wm_softc *);
    587 static void	wm_lan_init_done(struct wm_softc *);
    588 static void	wm_get_cfg_done(struct wm_softc *);
    589 static void	wm_initialize_hardware_bits(struct wm_softc *);
    590 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    591 static void	wm_reset(struct wm_softc *);
    592 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    593 static void	wm_rxdrain(struct wm_rxqueue *);
    594 static void	wm_rss_getkey(uint8_t *);
    595 static void	wm_init_rss(struct wm_softc *);
    596 static void	wm_adjust_qnum(struct wm_softc *, int);
    597 static int	wm_setup_legacy(struct wm_softc *);
    598 static int	wm_setup_msix(struct wm_softc *);
    599 static int	wm_init(struct ifnet *);
    600 static int	wm_init_locked(struct ifnet *);
    601 static void	wm_stop(struct ifnet *, int);
    602 static void	wm_stop_locked(struct ifnet *, int);
    603 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    604 static void	wm_82547_txfifo_stall(void *);
    605 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    606 /* DMA related */
    607 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    608 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    609 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    610 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    611     struct wm_txqueue *);
    612 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    613 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    614 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    615     struct wm_rxqueue *);
    616 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    617 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    618 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    619 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    620 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    621 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    622 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    623     struct wm_txqueue *);
    624 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    625     struct wm_rxqueue *);
    626 static int	wm_alloc_txrx_queues(struct wm_softc *);
    627 static void	wm_free_txrx_queues(struct wm_softc *);
    628 static int	wm_init_txrx_queues(struct wm_softc *);
    629 /* Start */
    630 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    631     uint32_t *, uint8_t *);
    632 static void	wm_start(struct ifnet *);
    633 static void	wm_start_locked(struct ifnet *);
    634 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    635     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    636 static void	wm_nq_start(struct ifnet *);
    637 static void	wm_nq_start_locked(struct ifnet *);
    638 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    639 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    640 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    641 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    642 /* Interrupt */
    643 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    644 static void	wm_rxeof(struct wm_rxqueue *);
    645 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    646 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    647 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    648 static void	wm_linkintr(struct wm_softc *, uint32_t);
    649 static int	wm_intr_legacy(void *);
    650 static int	wm_txrxintr_msix(void *);
    651 static int	wm_linkintr_msix(void *);
    652 
    653 /*
    654  * Media related.
    655  * GMII, SGMII, TBI, SERDES and SFP.
    656  */
    657 /* Common */
    658 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    659 /* GMII related */
    660 static void	wm_gmii_reset(struct wm_softc *);
    661 static int	wm_get_phy_id_82575(struct wm_softc *);
    662 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    663 static int	wm_gmii_mediachange(struct ifnet *);
    664 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    665 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    666 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    667 static int	wm_gmii_i82543_readreg(device_t, int, int);
    668 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    669 static int	wm_gmii_i82544_readreg(device_t, int, int);
    670 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    671 static int	wm_gmii_i80003_readreg(device_t, int, int);
    672 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    673 static int	wm_gmii_bm_readreg(device_t, int, int);
    674 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    675 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    676 static int	wm_gmii_hv_readreg(device_t, int, int);
    677 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    678 static int	wm_gmii_82580_readreg(device_t, int, int);
    679 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    680 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    681 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    682 static void	wm_gmii_statchg(struct ifnet *);
    683 static int	wm_kmrn_readreg(struct wm_softc *, int);
    684 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    685 /* SGMII */
    686 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    687 static int	wm_sgmii_readreg(device_t, int, int);
    688 static void	wm_sgmii_writereg(device_t, int, int, int);
    689 /* TBI related */
    690 static void	wm_tbi_mediainit(struct wm_softc *);
    691 static int	wm_tbi_mediachange(struct ifnet *);
    692 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    693 static int	wm_check_for_link(struct wm_softc *);
    694 static void	wm_tbi_tick(struct wm_softc *);
    695 /* SERDES related */
    696 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    697 static int	wm_serdes_mediachange(struct ifnet *);
    698 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    699 static void	wm_serdes_tick(struct wm_softc *);
    700 /* SFP related */
    701 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    702 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    703 
    704 /*
    705  * NVM related.
    706  * Microwire, SPI (w/wo EERD) and Flash.
    707  */
    708 /* Misc functions */
    709 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    710 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    711 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    712 /* Microwire */
    713 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    714 /* SPI */
    715 static int	wm_nvm_ready_spi(struct wm_softc *);
    716 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    717 /* Using with EERD */
    718 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    719 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    720 /* Flash */
    721 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    722     unsigned int *);
    723 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    724 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    725 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    726 	uint32_t *);
    727 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    728 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    729 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    730 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    731 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    732 /* iNVM */
    733 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    734 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    735 /* Lock, detecting NVM type, validate checksum and read */
    736 static int	wm_nvm_acquire(struct wm_softc *);
    737 static void	wm_nvm_release(struct wm_softc *);
    738 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    739 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    740 static int	wm_nvm_validate_checksum(struct wm_softc *);
    741 static void	wm_nvm_version_invm(struct wm_softc *);
    742 static void	wm_nvm_version(struct wm_softc *);
    743 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    744 
    745 /*
    746  * Hardware semaphores.
    747  * Very complexed...
    748  */
    749 static int	wm_get_swsm_semaphore(struct wm_softc *);
    750 static void	wm_put_swsm_semaphore(struct wm_softc *);
    751 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    752 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    753 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    754 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    755 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    756 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    757 
    758 /*
    759  * Management mode and power management related subroutines.
    760  * BMC, AMT, suspend/resume and EEE.
    761  */
    762 #ifdef WM_WOL
    763 static int	wm_check_mng_mode(struct wm_softc *);
    764 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    765 static int	wm_check_mng_mode_82574(struct wm_softc *);
    766 static int	wm_check_mng_mode_generic(struct wm_softc *);
    767 #endif
    768 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    769 static bool	wm_phy_resetisblocked(struct wm_softc *);
    770 static void	wm_get_hw_control(struct wm_softc *);
    771 static void	wm_release_hw_control(struct wm_softc *);
    772 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    773 static void	wm_smbustopci(struct wm_softc *);
    774 static void	wm_init_manageability(struct wm_softc *);
    775 static void	wm_release_manageability(struct wm_softc *);
    776 static void	wm_get_wakeup(struct wm_softc *);
    777 #ifdef WM_WOL
    778 static void	wm_enable_phy_wakeup(struct wm_softc *);
    779 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    780 static void	wm_enable_wakeup(struct wm_softc *);
    781 #endif
    782 /* LPLU (Low Power Link Up) */
    783 static void	wm_lplu_d0_disable(struct wm_softc *);
    784 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    785 /* EEE */
    786 static void	wm_set_eee_i350(struct wm_softc *);
    787 
    788 /*
    789  * Workarounds (mainly PHY related).
    790  * Basically, PHY's workarounds are in the PHY drivers.
    791  */
    792 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    793 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    794 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    795 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    796 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    797 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    798 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    799 static void	wm_reset_init_script_82575(struct wm_softc *);
    800 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    801 static void	wm_pll_workaround_i210(struct wm_softc *);
    802 
    803 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    804     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    805 
    806 /*
    807  * Devices supported by this driver.
    808  */
    809 static const struct wm_product {
    810 	pci_vendor_id_t		wmp_vendor;
    811 	pci_product_id_t	wmp_product;
    812 	const char		*wmp_name;
    813 	wm_chip_type		wmp_type;
    814 	uint32_t		wmp_flags;
    815 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    816 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    817 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    818 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    819 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    820 } wm_products[] = {
    821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    822 	  "Intel i82542 1000BASE-X Ethernet",
    823 	  WM_T_82542_2_1,	WMP_F_FIBER },
    824 
    825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    826 	  "Intel i82543GC 1000BASE-X Ethernet",
    827 	  WM_T_82543,		WMP_F_FIBER },
    828 
    829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    830 	  "Intel i82543GC 1000BASE-T Ethernet",
    831 	  WM_T_82543,		WMP_F_COPPER },
    832 
    833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    834 	  "Intel i82544EI 1000BASE-T Ethernet",
    835 	  WM_T_82544,		WMP_F_COPPER },
    836 
    837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    838 	  "Intel i82544EI 1000BASE-X Ethernet",
    839 	  WM_T_82544,		WMP_F_FIBER },
    840 
    841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    842 	  "Intel i82544GC 1000BASE-T Ethernet",
    843 	  WM_T_82544,		WMP_F_COPPER },
    844 
    845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    846 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    847 	  WM_T_82544,		WMP_F_COPPER },
    848 
    849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    850 	  "Intel i82540EM 1000BASE-T Ethernet",
    851 	  WM_T_82540,		WMP_F_COPPER },
    852 
    853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    854 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    855 	  WM_T_82540,		WMP_F_COPPER },
    856 
    857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    858 	  "Intel i82540EP 1000BASE-T Ethernet",
    859 	  WM_T_82540,		WMP_F_COPPER },
    860 
    861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    862 	  "Intel i82540EP 1000BASE-T Ethernet",
    863 	  WM_T_82540,		WMP_F_COPPER },
    864 
    865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    866 	  "Intel i82540EP 1000BASE-T Ethernet",
    867 	  WM_T_82540,		WMP_F_COPPER },
    868 
    869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    870 	  "Intel i82545EM 1000BASE-T Ethernet",
    871 	  WM_T_82545,		WMP_F_COPPER },
    872 
    873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    874 	  "Intel i82545GM 1000BASE-T Ethernet",
    875 	  WM_T_82545_3,		WMP_F_COPPER },
    876 
    877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    878 	  "Intel i82545GM 1000BASE-X Ethernet",
    879 	  WM_T_82545_3,		WMP_F_FIBER },
    880 
    881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    882 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    883 	  WM_T_82545_3,		WMP_F_SERDES },
    884 
    885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    886 	  "Intel i82546EB 1000BASE-T Ethernet",
    887 	  WM_T_82546,		WMP_F_COPPER },
    888 
    889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    890 	  "Intel i82546EB 1000BASE-T Ethernet",
    891 	  WM_T_82546,		WMP_F_COPPER },
    892 
    893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    894 	  "Intel i82545EM 1000BASE-X Ethernet",
    895 	  WM_T_82545,		WMP_F_FIBER },
    896 
    897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    898 	  "Intel i82546EB 1000BASE-X Ethernet",
    899 	  WM_T_82546,		WMP_F_FIBER },
    900 
    901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    902 	  "Intel i82546GB 1000BASE-T Ethernet",
    903 	  WM_T_82546_3,		WMP_F_COPPER },
    904 
    905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    906 	  "Intel i82546GB 1000BASE-X Ethernet",
    907 	  WM_T_82546_3,		WMP_F_FIBER },
    908 
    909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    910 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    911 	  WM_T_82546_3,		WMP_F_SERDES },
    912 
    913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    914 	  "i82546GB quad-port Gigabit Ethernet",
    915 	  WM_T_82546_3,		WMP_F_COPPER },
    916 
    917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    918 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    919 	  WM_T_82546_3,		WMP_F_COPPER },
    920 
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    922 	  "Intel PRO/1000MT (82546GB)",
    923 	  WM_T_82546_3,		WMP_F_COPPER },
    924 
    925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    926 	  "Intel i82541EI 1000BASE-T Ethernet",
    927 	  WM_T_82541,		WMP_F_COPPER },
    928 
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    930 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    931 	  WM_T_82541,		WMP_F_COPPER },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    934 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    935 	  WM_T_82541,		WMP_F_COPPER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    938 	  "Intel i82541ER 1000BASE-T Ethernet",
    939 	  WM_T_82541_2,		WMP_F_COPPER },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    942 	  "Intel i82541GI 1000BASE-T Ethernet",
    943 	  WM_T_82541_2,		WMP_F_COPPER },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    946 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    947 	  WM_T_82541_2,		WMP_F_COPPER },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    950 	  "Intel i82541PI 1000BASE-T Ethernet",
    951 	  WM_T_82541_2,		WMP_F_COPPER },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    954 	  "Intel i82547EI 1000BASE-T Ethernet",
    955 	  WM_T_82547,		WMP_F_COPPER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    958 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    959 	  WM_T_82547,		WMP_F_COPPER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    962 	  "Intel i82547GI 1000BASE-T Ethernet",
    963 	  WM_T_82547_2,		WMP_F_COPPER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    966 	  "Intel PRO/1000 PT (82571EB)",
    967 	  WM_T_82571,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    970 	  "Intel PRO/1000 PF (82571EB)",
    971 	  WM_T_82571,		WMP_F_FIBER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    974 	  "Intel PRO/1000 PB (82571EB)",
    975 	  WM_T_82571,		WMP_F_SERDES },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    978 	  "Intel PRO/1000 QT (82571EB)",
    979 	  WM_T_82571,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    982 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    983 	  WM_T_82571,		WMP_F_COPPER, },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    986 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    987 	  WM_T_82571,		WMP_F_COPPER, },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    990 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    991 	  WM_T_82571,		WMP_F_SERDES, },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    994 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    995 	  WM_T_82571,		WMP_F_SERDES, },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    998 	  "Intel 82571EB Quad 1000baseX Ethernet",
    999 	  WM_T_82571,		WMP_F_FIBER, },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1002 	  "Intel i82572EI 1000baseT Ethernet",
   1003 	  WM_T_82572,		WMP_F_COPPER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1006 	  "Intel i82572EI 1000baseX Ethernet",
   1007 	  WM_T_82572,		WMP_F_FIBER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1010 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1011 	  WM_T_82572,		WMP_F_SERDES },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1014 	  "Intel i82572EI 1000baseT Ethernet",
   1015 	  WM_T_82572,		WMP_F_COPPER },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1018 	  "Intel i82573E",
   1019 	  WM_T_82573,		WMP_F_COPPER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1022 	  "Intel i82573E IAMT",
   1023 	  WM_T_82573,		WMP_F_COPPER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1026 	  "Intel i82573L Gigabit Ethernet",
   1027 	  WM_T_82573,		WMP_F_COPPER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1030 	  "Intel i82574L",
   1031 	  WM_T_82574,		WMP_F_COPPER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1034 	  "Intel i82574L",
   1035 	  WM_T_82574,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1038 	  "Intel i82583V",
   1039 	  WM_T_82583,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1042 	  "i80003 dual 1000baseT Ethernet",
   1043 	  WM_T_80003,		WMP_F_COPPER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1046 	  "i80003 dual 1000baseX Ethernet",
   1047 	  WM_T_80003,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1050 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1051 	  WM_T_80003,		WMP_F_SERDES },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1054 	  "Intel i80003 1000baseT Ethernet",
   1055 	  WM_T_80003,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1058 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1059 	  WM_T_80003,		WMP_F_SERDES },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1062 	  "Intel i82801H (M_AMT) LAN Controller",
   1063 	  WM_T_ICH8,		WMP_F_COPPER },
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1065 	  "Intel i82801H (AMT) LAN Controller",
   1066 	  WM_T_ICH8,		WMP_F_COPPER },
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1068 	  "Intel i82801H LAN Controller",
   1069 	  WM_T_ICH8,		WMP_F_COPPER },
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1071 	  "Intel i82801H (IFE) LAN Controller",
   1072 	  WM_T_ICH8,		WMP_F_COPPER },
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1074 	  "Intel i82801H (M) LAN Controller",
   1075 	  WM_T_ICH8,		WMP_F_COPPER },
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1077 	  "Intel i82801H IFE (GT) LAN Controller",
   1078 	  WM_T_ICH8,		WMP_F_COPPER },
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1080 	  "Intel i82801H IFE (G) LAN Controller",
   1081 	  WM_T_ICH8,		WMP_F_COPPER },
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1083 	  "82801I (AMT) LAN Controller",
   1084 	  WM_T_ICH9,		WMP_F_COPPER },
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1086 	  "82801I LAN Controller",
   1087 	  WM_T_ICH9,		WMP_F_COPPER },
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1089 	  "82801I (G) LAN Controller",
   1090 	  WM_T_ICH9,		WMP_F_COPPER },
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1092 	  "82801I (GT) LAN Controller",
   1093 	  WM_T_ICH9,		WMP_F_COPPER },
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1095 	  "82801I (C) LAN Controller",
   1096 	  WM_T_ICH9,		WMP_F_COPPER },
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1098 	  "82801I mobile LAN Controller",
   1099 	  WM_T_ICH9,		WMP_F_COPPER },
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1101 	  "82801I mobile (V) LAN Controller",
   1102 	  WM_T_ICH9,		WMP_F_COPPER },
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1104 	  "82801I mobile (AMT) LAN Controller",
   1105 	  WM_T_ICH9,		WMP_F_COPPER },
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1107 	  "82567LM-4 LAN Controller",
   1108 	  WM_T_ICH9,		WMP_F_COPPER },
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1110 	  "82567V-3 LAN Controller",
   1111 	  WM_T_ICH9,		WMP_F_COPPER },
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1113 	  "82567LM-2 LAN Controller",
   1114 	  WM_T_ICH10,		WMP_F_COPPER },
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1116 	  "82567LF-2 LAN Controller",
   1117 	  WM_T_ICH10,		WMP_F_COPPER },
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1119 	  "82567LM-3 LAN Controller",
   1120 	  WM_T_ICH10,		WMP_F_COPPER },
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1122 	  "82567LF-3 LAN Controller",
   1123 	  WM_T_ICH10,		WMP_F_COPPER },
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1125 	  "82567V-2 LAN Controller",
   1126 	  WM_T_ICH10,		WMP_F_COPPER },
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1128 	  "82567V-3? LAN Controller",
   1129 	  WM_T_ICH10,		WMP_F_COPPER },
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1131 	  "HANKSVILLE LAN Controller",
   1132 	  WM_T_ICH10,		WMP_F_COPPER },
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1134 	  "PCH LAN (82577LM) Controller",
   1135 	  WM_T_PCH,		WMP_F_COPPER },
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1137 	  "PCH LAN (82577LC) Controller",
   1138 	  WM_T_PCH,		WMP_F_COPPER },
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1140 	  "PCH LAN (82578DM) Controller",
   1141 	  WM_T_PCH,		WMP_F_COPPER },
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1143 	  "PCH LAN (82578DC) Controller",
   1144 	  WM_T_PCH,		WMP_F_COPPER },
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1146 	  "PCH2 LAN (82579LM) Controller",
   1147 	  WM_T_PCH2,		WMP_F_COPPER },
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1149 	  "PCH2 LAN (82579V) Controller",
   1150 	  WM_T_PCH2,		WMP_F_COPPER },
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1152 	  "82575EB dual-1000baseT Ethernet",
   1153 	  WM_T_82575,		WMP_F_COPPER },
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1155 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1156 	  WM_T_82575,		WMP_F_SERDES },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1158 	  "82575GB quad-1000baseT Ethernet",
   1159 	  WM_T_82575,		WMP_F_COPPER },
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1161 	  "82575GB quad-1000baseT Ethernet (PM)",
   1162 	  WM_T_82575,		WMP_F_COPPER },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1164 	  "82576 1000BaseT Ethernet",
   1165 	  WM_T_82576,		WMP_F_COPPER },
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1167 	  "82576 1000BaseX Ethernet",
   1168 	  WM_T_82576,		WMP_F_FIBER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1171 	  "82576 gigabit Ethernet (SERDES)",
   1172 	  WM_T_82576,		WMP_F_SERDES },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1175 	  "82576 quad-1000BaseT Ethernet",
   1176 	  WM_T_82576,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1179 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1180 	  WM_T_82576,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1183 	  "82576 gigabit Ethernet",
   1184 	  WM_T_82576,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1187 	  "82576 gigabit Ethernet (SERDES)",
   1188 	  WM_T_82576,		WMP_F_SERDES },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1190 	  "82576 quad-gigabit Ethernet (SERDES)",
   1191 	  WM_T_82576,		WMP_F_SERDES },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1194 	  "82580 1000BaseT Ethernet",
   1195 	  WM_T_82580,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1197 	  "82580 1000BaseX Ethernet",
   1198 	  WM_T_82580,		WMP_F_FIBER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1201 	  "82580 1000BaseT Ethernet (SERDES)",
   1202 	  WM_T_82580,		WMP_F_SERDES },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1205 	  "82580 gigabit Ethernet (SGMII)",
   1206 	  WM_T_82580,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1208 	  "82580 dual-1000BaseT Ethernet",
   1209 	  WM_T_82580,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1212 	  "82580 quad-1000BaseX Ethernet",
   1213 	  WM_T_82580,		WMP_F_FIBER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1216 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1217 	  WM_T_82580,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1220 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1221 	  WM_T_82580,		WMP_F_SERDES },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1224 	  "DH89XXCC 1000BASE-KX Ethernet",
   1225 	  WM_T_82580,		WMP_F_SERDES },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1228 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1229 	  WM_T_82580,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1232 	  "I350 Gigabit Network Connection",
   1233 	  WM_T_I350,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1236 	  "I350 Gigabit Fiber Network Connection",
   1237 	  WM_T_I350,		WMP_F_FIBER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1240 	  "I350 Gigabit Backplane Connection",
   1241 	  WM_T_I350,		WMP_F_SERDES },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1244 	  "I350 Quad Port Gigabit Ethernet",
   1245 	  WM_T_I350,		WMP_F_SERDES },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1248 	  "I350 Gigabit Connection",
   1249 	  WM_T_I350,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1252 	  "I354 Gigabit Ethernet (KX)",
   1253 	  WM_T_I354,		WMP_F_SERDES },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1256 	  "I354 Gigabit Ethernet (SGMII)",
   1257 	  WM_T_I354,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1260 	  "I354 Gigabit Ethernet (2.5G)",
   1261 	  WM_T_I354,		WMP_F_COPPER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1264 	  "I210-T1 Ethernet Server Adapter",
   1265 	  WM_T_I210,		WMP_F_COPPER },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1268 	  "I210 Ethernet (Copper OEM)",
   1269 	  WM_T_I210,		WMP_F_COPPER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1272 	  "I210 Ethernet (Copper IT)",
   1273 	  WM_T_I210,		WMP_F_COPPER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1276 	  "I210 Ethernet (FLASH less)",
   1277 	  WM_T_I210,		WMP_F_COPPER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1280 	  "I210 Gigabit Ethernet (Fiber)",
   1281 	  WM_T_I210,		WMP_F_FIBER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1284 	  "I210 Gigabit Ethernet (SERDES)",
   1285 	  WM_T_I210,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1288 	  "I210 Gigabit Ethernet (FLASH less)",
   1289 	  WM_T_I210,		WMP_F_SERDES },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1292 	  "I210 Gigabit Ethernet (SGMII)",
   1293 	  WM_T_I210,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1296 	  "I211 Ethernet (COPPER)",
   1297 	  WM_T_I211,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1299 	  "I217 V Ethernet Connection",
   1300 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1302 	  "I217 LM Ethernet Connection",
   1303 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1305 	  "I218 V Ethernet Connection",
   1306 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1308 	  "I218 V Ethernet Connection",
   1309 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1311 	  "I218 V Ethernet Connection",
   1312 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1314 	  "I218 LM Ethernet Connection",
   1315 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1317 	  "I218 LM Ethernet Connection",
   1318 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1320 	  "I218 LM Ethernet Connection",
   1321 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1322 #if 0
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1324 	  "I219 V Ethernet Connection",
   1325 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1327 	  "I219 V Ethernet Connection",
   1328 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1330 	  "I219 LM Ethernet Connection",
   1331 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1333 	  "I219 LM Ethernet Connection",
   1334 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1335 #endif
   1336 	{ 0,			0,
   1337 	  NULL,
   1338 	  0,			0 },
   1339 };
   1340 
   1341 #ifdef WM_EVENT_COUNTERS
   1342 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1343 #endif /* WM_EVENT_COUNTERS */
   1344 
   1345 
   1346 /*
   1347  * Register read/write functions.
   1348  * Other than CSR_{READ|WRITE}().
   1349  */
   1350 
   1351 #if 0 /* Not currently used */
   1352 static inline uint32_t
   1353 wm_io_read(struct wm_softc *sc, int reg)
   1354 {
   1355 
   1356 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1357 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1358 }
   1359 #endif
   1360 
   1361 static inline void
   1362 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1363 {
   1364 
   1365 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1366 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1367 }
   1368 
   1369 static inline void
   1370 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1371     uint32_t data)
   1372 {
   1373 	uint32_t regval;
   1374 	int i;
   1375 
   1376 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1377 
   1378 	CSR_WRITE(sc, reg, regval);
   1379 
   1380 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1381 		delay(5);
   1382 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1383 			break;
   1384 	}
   1385 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1386 		aprint_error("%s: WARNING:"
   1387 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1388 		    device_xname(sc->sc_dev), reg);
   1389 	}
   1390 }
   1391 
   1392 static inline void
   1393 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1394 {
   1395 	wa->wa_low = htole32(v & 0xffffffffU);
   1396 	if (sizeof(bus_addr_t) == 8)
   1397 		wa->wa_high = htole32((uint64_t) v >> 32);
   1398 	else
   1399 		wa->wa_high = 0;
   1400 }
   1401 
   1402 /*
   1403  * Descriptor sync/init functions.
   1404  */
   1405 static inline void
   1406 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1407 {
   1408 	struct wm_softc *sc = txq->txq_sc;
   1409 
   1410 	/* If it will wrap around, sync to the end of the ring. */
   1411 	if ((start + num) > WM_NTXDESC(txq)) {
   1412 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1413 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1414 		    (WM_NTXDESC(txq) - start), ops);
   1415 		num -= (WM_NTXDESC(txq) - start);
   1416 		start = 0;
   1417 	}
   1418 
   1419 	/* Now sync whatever is left. */
   1420 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1421 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1422 }
   1423 
   1424 static inline void
   1425 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1426 {
   1427 	struct wm_softc *sc = rxq->rxq_sc;
   1428 
   1429 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1430 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1431 }
   1432 
   1433 static inline void
   1434 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1435 {
   1436 	struct wm_softc *sc = rxq->rxq_sc;
   1437 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1438 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1439 	struct mbuf *m = rxs->rxs_mbuf;
   1440 
   1441 	/*
   1442 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1443 	 * so that the payload after the Ethernet header is aligned
   1444 	 * to a 4-byte boundary.
   1445 
   1446 	 * XXX BRAINDAMAGE ALERT!
   1447 	 * The stupid chip uses the same size for every buffer, which
   1448 	 * is set in the Receive Control register.  We are using the 2K
   1449 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1450 	 * reason, we can't "scoot" packets longer than the standard
   1451 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1452 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1453 	 * the upper layer copy the headers.
   1454 	 */
   1455 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1456 
   1457 	wm_set_dma_addr(&rxd->wrx_addr,
   1458 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1459 	rxd->wrx_len = 0;
   1460 	rxd->wrx_cksum = 0;
   1461 	rxd->wrx_status = 0;
   1462 	rxd->wrx_errors = 0;
   1463 	rxd->wrx_special = 0;
   1464 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1465 
   1466 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1467 }
   1468 
   1469 /*
   1470  * Device driver interface functions and commonly used functions.
   1471  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1472  */
   1473 
   1474 /* Lookup supported device table */
   1475 static const struct wm_product *
   1476 wm_lookup(const struct pci_attach_args *pa)
   1477 {
   1478 	const struct wm_product *wmp;
   1479 
   1480 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1481 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1482 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1483 			return wmp;
   1484 	}
   1485 	return NULL;
   1486 }
   1487 
   1488 /* The match function (ca_match) */
   1489 static int
   1490 wm_match(device_t parent, cfdata_t cf, void *aux)
   1491 {
   1492 	struct pci_attach_args *pa = aux;
   1493 
   1494 	if (wm_lookup(pa) != NULL)
   1495 		return 1;
   1496 
   1497 	return 0;
   1498 }
   1499 
   1500 /* The attach function (ca_attach) */
   1501 static void
   1502 wm_attach(device_t parent, device_t self, void *aux)
   1503 {
   1504 	struct wm_softc *sc = device_private(self);
   1505 	struct pci_attach_args *pa = aux;
   1506 	prop_dictionary_t dict;
   1507 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1508 	pci_chipset_tag_t pc = pa->pa_pc;
   1509 	int counts[PCI_INTR_TYPE_SIZE];
   1510 	pci_intr_type_t max_type;
   1511 	const char *eetype, *xname;
   1512 	bus_space_tag_t memt;
   1513 	bus_space_handle_t memh;
   1514 	bus_size_t memsize;
   1515 	int memh_valid;
   1516 	int i, error;
   1517 	const struct wm_product *wmp;
   1518 	prop_data_t ea;
   1519 	prop_number_t pn;
   1520 	uint8_t enaddr[ETHER_ADDR_LEN];
   1521 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1522 	pcireg_t preg, memtype;
   1523 	uint16_t eeprom_data, apme_mask;
   1524 	bool force_clear_smbi;
   1525 	uint32_t link_mode;
   1526 	uint32_t reg;
   1527 
   1528 	sc->sc_dev = self;
   1529 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1530 	sc->sc_stopping = false;
   1531 
   1532 	wmp = wm_lookup(pa);
   1533 #ifdef DIAGNOSTIC
   1534 	if (wmp == NULL) {
   1535 		printf("\n");
   1536 		panic("wm_attach: impossible");
   1537 	}
   1538 #endif
   1539 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1540 
   1541 	sc->sc_pc = pa->pa_pc;
   1542 	sc->sc_pcitag = pa->pa_tag;
   1543 
   1544 	if (pci_dma64_available(pa))
   1545 		sc->sc_dmat = pa->pa_dmat64;
   1546 	else
   1547 		sc->sc_dmat = pa->pa_dmat;
   1548 
   1549 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1550 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1551 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1552 
   1553 	sc->sc_type = wmp->wmp_type;
   1554 	if (sc->sc_type < WM_T_82543) {
   1555 		if (sc->sc_rev < 2) {
   1556 			aprint_error_dev(sc->sc_dev,
   1557 			    "i82542 must be at least rev. 2\n");
   1558 			return;
   1559 		}
   1560 		if (sc->sc_rev < 3)
   1561 			sc->sc_type = WM_T_82542_2_0;
   1562 	}
   1563 
   1564 	/*
   1565 	 * Disable MSI for Errata:
   1566 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1567 	 *
   1568 	 *  82544: Errata 25
   1569 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1570 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1571 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1572 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1573 	 *
   1574 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1575 	 *
   1576 	 *  82571 & 82572: Errata 63
   1577 	 */
   1578 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1579 	    || (sc->sc_type == WM_T_82572))
   1580 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1581 
   1582 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1583 	    || (sc->sc_type == WM_T_82580)
   1584 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1585 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1586 		sc->sc_flags |= WM_F_NEWQUEUE;
   1587 
   1588 	/* Set device properties (mactype) */
   1589 	dict = device_properties(sc->sc_dev);
   1590 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1591 
   1592 	/*
   1593 	 * Map the device.  All devices support memory-mapped acccess,
   1594 	 * and it is really required for normal operation.
   1595 	 */
   1596 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1597 	switch (memtype) {
   1598 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1599 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1600 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1601 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1602 		break;
   1603 	default:
   1604 		memh_valid = 0;
   1605 		break;
   1606 	}
   1607 
   1608 	if (memh_valid) {
   1609 		sc->sc_st = memt;
   1610 		sc->sc_sh = memh;
   1611 		sc->sc_ss = memsize;
   1612 	} else {
   1613 		aprint_error_dev(sc->sc_dev,
   1614 		    "unable to map device registers\n");
   1615 		return;
   1616 	}
   1617 
   1618 	/*
   1619 	 * In addition, i82544 and later support I/O mapped indirect
   1620 	 * register access.  It is not desirable (nor supported in
   1621 	 * this driver) to use it for normal operation, though it is
   1622 	 * required to work around bugs in some chip versions.
   1623 	 */
   1624 	if (sc->sc_type >= WM_T_82544) {
   1625 		/* First we have to find the I/O BAR. */
   1626 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1627 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1628 			if (memtype == PCI_MAPREG_TYPE_IO)
   1629 				break;
   1630 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1631 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1632 				i += 4;	/* skip high bits, too */
   1633 		}
   1634 		if (i < PCI_MAPREG_END) {
   1635 			/*
   1636 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1637 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1638 			 * It's no problem because newer chips has no this
   1639 			 * bug.
   1640 			 *
   1641 			 * The i8254x doesn't apparently respond when the
   1642 			 * I/O BAR is 0, which looks somewhat like it's not
   1643 			 * been configured.
   1644 			 */
   1645 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1646 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1647 				aprint_error_dev(sc->sc_dev,
   1648 				    "WARNING: I/O BAR at zero.\n");
   1649 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1650 					0, &sc->sc_iot, &sc->sc_ioh,
   1651 					NULL, &sc->sc_ios) == 0) {
   1652 				sc->sc_flags |= WM_F_IOH_VALID;
   1653 			} else {
   1654 				aprint_error_dev(sc->sc_dev,
   1655 				    "WARNING: unable to map I/O space\n");
   1656 			}
   1657 		}
   1658 
   1659 	}
   1660 
   1661 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1662 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1663 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1664 	if (sc->sc_type < WM_T_82542_2_1)
   1665 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1666 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1667 
   1668 	/* power up chip */
   1669 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1670 	    NULL)) && error != EOPNOTSUPP) {
   1671 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1672 		return;
   1673 	}
   1674 
   1675 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1676 
   1677 	/* Allocation settings */
   1678 	max_type = PCI_INTR_TYPE_MSIX;
   1679 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1680 	counts[PCI_INTR_TYPE_MSI] = 1;
   1681 	counts[PCI_INTR_TYPE_INTX] = 1;
   1682 
   1683 alloc_retry:
   1684 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1685 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1686 		return;
   1687 	}
   1688 
   1689 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1690 		error = wm_setup_msix(sc);
   1691 		if (error) {
   1692 			pci_intr_release(pc, sc->sc_intrs,
   1693 			    counts[PCI_INTR_TYPE_MSIX]);
   1694 
   1695 			/* Setup for MSI: Disable MSI-X */
   1696 			max_type = PCI_INTR_TYPE_MSI;
   1697 			counts[PCI_INTR_TYPE_MSI] = 1;
   1698 			counts[PCI_INTR_TYPE_INTX] = 1;
   1699 			goto alloc_retry;
   1700 		}
   1701 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1702 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1703 		error = wm_setup_legacy(sc);
   1704 		if (error) {
   1705 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1706 			    counts[PCI_INTR_TYPE_MSI]);
   1707 
   1708 			/* The next try is for INTx: Disable MSI */
   1709 			max_type = PCI_INTR_TYPE_INTX;
   1710 			counts[PCI_INTR_TYPE_INTX] = 1;
   1711 			goto alloc_retry;
   1712 		}
   1713 	} else {
   1714 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1715 		error = wm_setup_legacy(sc);
   1716 		if (error) {
   1717 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1718 			    counts[PCI_INTR_TYPE_INTX]);
   1719 			return;
   1720 		}
   1721 	}
   1722 
   1723 	/*
   1724 	 * Check the function ID (unit number of the chip).
   1725 	 */
   1726 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1727 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1728 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1729 	    || (sc->sc_type == WM_T_82580)
   1730 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1731 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1732 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1733 	else
   1734 		sc->sc_funcid = 0;
   1735 
   1736 	/*
   1737 	 * Determine a few things about the bus we're connected to.
   1738 	 */
   1739 	if (sc->sc_type < WM_T_82543) {
   1740 		/* We don't really know the bus characteristics here. */
   1741 		sc->sc_bus_speed = 33;
   1742 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1743 		/*
   1744 		 * CSA (Communication Streaming Architecture) is about as fast
   1745 		 * a 32-bit 66MHz PCI Bus.
   1746 		 */
   1747 		sc->sc_flags |= WM_F_CSA;
   1748 		sc->sc_bus_speed = 66;
   1749 		aprint_verbose_dev(sc->sc_dev,
   1750 		    "Communication Streaming Architecture\n");
   1751 		if (sc->sc_type == WM_T_82547) {
   1752 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1753 			callout_setfunc(&sc->sc_txfifo_ch,
   1754 					wm_82547_txfifo_stall, sc);
   1755 			aprint_verbose_dev(sc->sc_dev,
   1756 			    "using 82547 Tx FIFO stall work-around\n");
   1757 		}
   1758 	} else if (sc->sc_type >= WM_T_82571) {
   1759 		sc->sc_flags |= WM_F_PCIE;
   1760 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1761 		    && (sc->sc_type != WM_T_ICH10)
   1762 		    && (sc->sc_type != WM_T_PCH)
   1763 		    && (sc->sc_type != WM_T_PCH2)
   1764 		    && (sc->sc_type != WM_T_PCH_LPT)
   1765 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1766 			/* ICH* and PCH* have no PCIe capability registers */
   1767 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1768 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1769 				NULL) == 0)
   1770 				aprint_error_dev(sc->sc_dev,
   1771 				    "unable to find PCIe capability\n");
   1772 		}
   1773 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1774 	} else {
   1775 		reg = CSR_READ(sc, WMREG_STATUS);
   1776 		if (reg & STATUS_BUS64)
   1777 			sc->sc_flags |= WM_F_BUS64;
   1778 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1779 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1780 
   1781 			sc->sc_flags |= WM_F_PCIX;
   1782 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1783 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1784 				aprint_error_dev(sc->sc_dev,
   1785 				    "unable to find PCIX capability\n");
   1786 			else if (sc->sc_type != WM_T_82545_3 &&
   1787 				 sc->sc_type != WM_T_82546_3) {
   1788 				/*
   1789 				 * Work around a problem caused by the BIOS
   1790 				 * setting the max memory read byte count
   1791 				 * incorrectly.
   1792 				 */
   1793 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1794 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1795 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1796 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1797 
   1798 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1799 				    PCIX_CMD_BYTECNT_SHIFT;
   1800 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1801 				    PCIX_STATUS_MAXB_SHIFT;
   1802 				if (bytecnt > maxb) {
   1803 					aprint_verbose_dev(sc->sc_dev,
   1804 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1805 					    512 << bytecnt, 512 << maxb);
   1806 					pcix_cmd = (pcix_cmd &
   1807 					    ~PCIX_CMD_BYTECNT_MASK) |
   1808 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1809 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1810 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1811 					    pcix_cmd);
   1812 				}
   1813 			}
   1814 		}
   1815 		/*
   1816 		 * The quad port adapter is special; it has a PCIX-PCIX
   1817 		 * bridge on the board, and can run the secondary bus at
   1818 		 * a higher speed.
   1819 		 */
   1820 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1821 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1822 								      : 66;
   1823 		} else if (sc->sc_flags & WM_F_PCIX) {
   1824 			switch (reg & STATUS_PCIXSPD_MASK) {
   1825 			case STATUS_PCIXSPD_50_66:
   1826 				sc->sc_bus_speed = 66;
   1827 				break;
   1828 			case STATUS_PCIXSPD_66_100:
   1829 				sc->sc_bus_speed = 100;
   1830 				break;
   1831 			case STATUS_PCIXSPD_100_133:
   1832 				sc->sc_bus_speed = 133;
   1833 				break;
   1834 			default:
   1835 				aprint_error_dev(sc->sc_dev,
   1836 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1837 				    reg & STATUS_PCIXSPD_MASK);
   1838 				sc->sc_bus_speed = 66;
   1839 				break;
   1840 			}
   1841 		} else
   1842 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1843 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1844 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1845 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1846 	}
   1847 
   1848 	/* clear interesting stat counters */
   1849 	CSR_READ(sc, WMREG_COLC);
   1850 	CSR_READ(sc, WMREG_RXERRC);
   1851 
   1852 	/* get PHY control from SMBus to PCIe */
   1853 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1854 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1855 		wm_smbustopci(sc);
   1856 
   1857 	/* Reset the chip to a known state. */
   1858 	wm_reset(sc);
   1859 
   1860 	/* Get some information about the EEPROM. */
   1861 	switch (sc->sc_type) {
   1862 	case WM_T_82542_2_0:
   1863 	case WM_T_82542_2_1:
   1864 	case WM_T_82543:
   1865 	case WM_T_82544:
   1866 		/* Microwire */
   1867 		sc->sc_nvm_wordsize = 64;
   1868 		sc->sc_nvm_addrbits = 6;
   1869 		break;
   1870 	case WM_T_82540:
   1871 	case WM_T_82545:
   1872 	case WM_T_82545_3:
   1873 	case WM_T_82546:
   1874 	case WM_T_82546_3:
   1875 		/* Microwire */
   1876 		reg = CSR_READ(sc, WMREG_EECD);
   1877 		if (reg & EECD_EE_SIZE) {
   1878 			sc->sc_nvm_wordsize = 256;
   1879 			sc->sc_nvm_addrbits = 8;
   1880 		} else {
   1881 			sc->sc_nvm_wordsize = 64;
   1882 			sc->sc_nvm_addrbits = 6;
   1883 		}
   1884 		sc->sc_flags |= WM_F_LOCK_EECD;
   1885 		break;
   1886 	case WM_T_82541:
   1887 	case WM_T_82541_2:
   1888 	case WM_T_82547:
   1889 	case WM_T_82547_2:
   1890 		sc->sc_flags |= WM_F_LOCK_EECD;
   1891 		reg = CSR_READ(sc, WMREG_EECD);
   1892 		if (reg & EECD_EE_TYPE) {
   1893 			/* SPI */
   1894 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1895 			wm_nvm_set_addrbits_size_eecd(sc);
   1896 		} else {
   1897 			/* Microwire */
   1898 			if ((reg & EECD_EE_ABITS) != 0) {
   1899 				sc->sc_nvm_wordsize = 256;
   1900 				sc->sc_nvm_addrbits = 8;
   1901 			} else {
   1902 				sc->sc_nvm_wordsize = 64;
   1903 				sc->sc_nvm_addrbits = 6;
   1904 			}
   1905 		}
   1906 		break;
   1907 	case WM_T_82571:
   1908 	case WM_T_82572:
   1909 		/* SPI */
   1910 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1911 		wm_nvm_set_addrbits_size_eecd(sc);
   1912 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1913 		break;
   1914 	case WM_T_82573:
   1915 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1916 		/* FALLTHROUGH */
   1917 	case WM_T_82574:
   1918 	case WM_T_82583:
   1919 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1920 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1921 			sc->sc_nvm_wordsize = 2048;
   1922 		} else {
   1923 			/* SPI */
   1924 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1925 			wm_nvm_set_addrbits_size_eecd(sc);
   1926 		}
   1927 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1928 		break;
   1929 	case WM_T_82575:
   1930 	case WM_T_82576:
   1931 	case WM_T_82580:
   1932 	case WM_T_I350:
   1933 	case WM_T_I354:
   1934 	case WM_T_80003:
   1935 		/* SPI */
   1936 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1937 		wm_nvm_set_addrbits_size_eecd(sc);
   1938 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1939 		    | WM_F_LOCK_SWSM;
   1940 		break;
   1941 	case WM_T_ICH8:
   1942 	case WM_T_ICH9:
   1943 	case WM_T_ICH10:
   1944 	case WM_T_PCH:
   1945 	case WM_T_PCH2:
   1946 	case WM_T_PCH_LPT:
   1947 		/* FLASH */
   1948 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1949 		sc->sc_nvm_wordsize = 2048;
   1950 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1951 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1952 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1953 			aprint_error_dev(sc->sc_dev,
   1954 			    "can't map FLASH registers\n");
   1955 			goto out;
   1956 		}
   1957 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1958 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1959 		    ICH_FLASH_SECTOR_SIZE;
   1960 		sc->sc_ich8_flash_bank_size =
   1961 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1962 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1963 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1964 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1965 		sc->sc_flashreg_offset = 0;
   1966 		break;
   1967 	case WM_T_PCH_SPT:
   1968 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1969 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1970 		sc->sc_flasht = sc->sc_st;
   1971 		sc->sc_flashh = sc->sc_sh;
   1972 		sc->sc_ich8_flash_base = 0;
   1973 		sc->sc_nvm_wordsize =
   1974 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   1975 			* NVM_SIZE_MULTIPLIER;
   1976 		/* It is size in bytes, we want words */
   1977 		sc->sc_nvm_wordsize /= 2;
   1978 		/* assume 2 banks */
   1979 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   1980 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   1981 		break;
   1982 	case WM_T_I210:
   1983 	case WM_T_I211:
   1984 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1985 			wm_nvm_set_addrbits_size_eecd(sc);
   1986 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1987 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1988 		} else {
   1989 			sc->sc_nvm_wordsize = INVM_SIZE;
   1990 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1991 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1992 		}
   1993 		break;
   1994 	default:
   1995 		break;
   1996 	}
   1997 
   1998 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1999 	switch (sc->sc_type) {
   2000 	case WM_T_82571:
   2001 	case WM_T_82572:
   2002 		reg = CSR_READ(sc, WMREG_SWSM2);
   2003 		if ((reg & SWSM2_LOCK) == 0) {
   2004 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2005 			force_clear_smbi = true;
   2006 		} else
   2007 			force_clear_smbi = false;
   2008 		break;
   2009 	case WM_T_82573:
   2010 	case WM_T_82574:
   2011 	case WM_T_82583:
   2012 		force_clear_smbi = true;
   2013 		break;
   2014 	default:
   2015 		force_clear_smbi = false;
   2016 		break;
   2017 	}
   2018 	if (force_clear_smbi) {
   2019 		reg = CSR_READ(sc, WMREG_SWSM);
   2020 		if ((reg & SWSM_SMBI) != 0)
   2021 			aprint_error_dev(sc->sc_dev,
   2022 			    "Please update the Bootagent\n");
   2023 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2024 	}
   2025 
   2026 	/*
   2027 	 * Defer printing the EEPROM type until after verifying the checksum
   2028 	 * This allows the EEPROM type to be printed correctly in the case
   2029 	 * that no EEPROM is attached.
   2030 	 */
   2031 	/*
   2032 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2033 	 * this for later, so we can fail future reads from the EEPROM.
   2034 	 */
   2035 	if (wm_nvm_validate_checksum(sc)) {
   2036 		/*
   2037 		 * Read twice again because some PCI-e parts fail the
   2038 		 * first check due to the link being in sleep state.
   2039 		 */
   2040 		if (wm_nvm_validate_checksum(sc))
   2041 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2042 	}
   2043 
   2044 	/* Set device properties (macflags) */
   2045 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2046 
   2047 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2048 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2049 	else {
   2050 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2051 		    sc->sc_nvm_wordsize);
   2052 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2053 			aprint_verbose("iNVM");
   2054 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2055 			aprint_verbose("FLASH(HW)");
   2056 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2057 			aprint_verbose("FLASH");
   2058 		else {
   2059 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2060 				eetype = "SPI";
   2061 			else
   2062 				eetype = "MicroWire";
   2063 			aprint_verbose("(%d address bits) %s EEPROM",
   2064 			    sc->sc_nvm_addrbits, eetype);
   2065 		}
   2066 	}
   2067 	wm_nvm_version(sc);
   2068 	aprint_verbose("\n");
   2069 
   2070 	/* Check for I21[01] PLL workaround */
   2071 	if (sc->sc_type == WM_T_I210)
   2072 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2073 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2074 		/* NVM image release 3.25 has a workaround */
   2075 		if ((sc->sc_nvm_ver_major < 3)
   2076 		    || ((sc->sc_nvm_ver_major == 3)
   2077 			&& (sc->sc_nvm_ver_minor < 25))) {
   2078 			aprint_verbose_dev(sc->sc_dev,
   2079 			    "ROM image version %d.%d is older than 3.25\n",
   2080 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2081 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2082 		}
   2083 	}
   2084 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2085 		wm_pll_workaround_i210(sc);
   2086 
   2087 	wm_get_wakeup(sc);
   2088 	switch (sc->sc_type) {
   2089 	case WM_T_82571:
   2090 	case WM_T_82572:
   2091 	case WM_T_82573:
   2092 	case WM_T_82574:
   2093 	case WM_T_82583:
   2094 	case WM_T_80003:
   2095 	case WM_T_ICH8:
   2096 	case WM_T_ICH9:
   2097 	case WM_T_ICH10:
   2098 	case WM_T_PCH:
   2099 	case WM_T_PCH2:
   2100 	case WM_T_PCH_LPT:
   2101 	case WM_T_PCH_SPT:
   2102 		/* Non-AMT based hardware can now take control from firmware */
   2103 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2104 			wm_get_hw_control(sc);
   2105 		break;
   2106 	default:
   2107 		break;
   2108 	}
   2109 
   2110 	/*
   2111 	 * Read the Ethernet address from the EEPROM, if not first found
   2112 	 * in device properties.
   2113 	 */
   2114 	ea = prop_dictionary_get(dict, "mac-address");
   2115 	if (ea != NULL) {
   2116 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2117 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2118 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2119 	} else {
   2120 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2121 			aprint_error_dev(sc->sc_dev,
   2122 			    "unable to read Ethernet address\n");
   2123 			goto out;
   2124 		}
   2125 	}
   2126 
   2127 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2128 	    ether_sprintf(enaddr));
   2129 
   2130 	/*
   2131 	 * Read the config info from the EEPROM, and set up various
   2132 	 * bits in the control registers based on their contents.
   2133 	 */
   2134 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2135 	if (pn != NULL) {
   2136 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2137 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2138 	} else {
   2139 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2140 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2141 			goto out;
   2142 		}
   2143 	}
   2144 
   2145 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2146 	if (pn != NULL) {
   2147 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2148 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2149 	} else {
   2150 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2151 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2152 			goto out;
   2153 		}
   2154 	}
   2155 
   2156 	/* check for WM_F_WOL */
   2157 	switch (sc->sc_type) {
   2158 	case WM_T_82542_2_0:
   2159 	case WM_T_82542_2_1:
   2160 	case WM_T_82543:
   2161 		/* dummy? */
   2162 		eeprom_data = 0;
   2163 		apme_mask = NVM_CFG3_APME;
   2164 		break;
   2165 	case WM_T_82544:
   2166 		apme_mask = NVM_CFG2_82544_APM_EN;
   2167 		eeprom_data = cfg2;
   2168 		break;
   2169 	case WM_T_82546:
   2170 	case WM_T_82546_3:
   2171 	case WM_T_82571:
   2172 	case WM_T_82572:
   2173 	case WM_T_82573:
   2174 	case WM_T_82574:
   2175 	case WM_T_82583:
   2176 	case WM_T_80003:
   2177 	default:
   2178 		apme_mask = NVM_CFG3_APME;
   2179 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2180 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2181 		break;
   2182 	case WM_T_82575:
   2183 	case WM_T_82576:
   2184 	case WM_T_82580:
   2185 	case WM_T_I350:
   2186 	case WM_T_I354: /* XXX ok? */
   2187 	case WM_T_ICH8:
   2188 	case WM_T_ICH9:
   2189 	case WM_T_ICH10:
   2190 	case WM_T_PCH:
   2191 	case WM_T_PCH2:
   2192 	case WM_T_PCH_LPT:
   2193 	case WM_T_PCH_SPT:
   2194 		/* XXX The funcid should be checked on some devices */
   2195 		apme_mask = WUC_APME;
   2196 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2197 		break;
   2198 	}
   2199 
   2200 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2201 	if ((eeprom_data & apme_mask) != 0)
   2202 		sc->sc_flags |= WM_F_WOL;
   2203 #ifdef WM_DEBUG
   2204 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2205 		printf("WOL\n");
   2206 #endif
   2207 
   2208 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2209 		/* Check NVM for autonegotiation */
   2210 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2211 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2212 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2213 		}
   2214 	}
   2215 
   2216 	/*
   2217 	 * XXX need special handling for some multiple port cards
   2218 	 * to disable a paticular port.
   2219 	 */
   2220 
   2221 	if (sc->sc_type >= WM_T_82544) {
   2222 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2223 		if (pn != NULL) {
   2224 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2225 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2226 		} else {
   2227 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2228 				aprint_error_dev(sc->sc_dev,
   2229 				    "unable to read SWDPIN\n");
   2230 				goto out;
   2231 			}
   2232 		}
   2233 	}
   2234 
   2235 	if (cfg1 & NVM_CFG1_ILOS)
   2236 		sc->sc_ctrl |= CTRL_ILOS;
   2237 
   2238 	/*
   2239 	 * XXX
   2240 	 * This code isn't correct because pin 2 and 3 are located
   2241 	 * in different position on newer chips. Check all datasheet.
   2242 	 *
   2243 	 * Until resolve this problem, check if a chip < 82580
   2244 	 */
   2245 	if (sc->sc_type <= WM_T_82580) {
   2246 		if (sc->sc_type >= WM_T_82544) {
   2247 			sc->sc_ctrl |=
   2248 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2249 			    CTRL_SWDPIO_SHIFT;
   2250 			sc->sc_ctrl |=
   2251 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2252 			    CTRL_SWDPINS_SHIFT;
   2253 		} else {
   2254 			sc->sc_ctrl |=
   2255 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2256 			    CTRL_SWDPIO_SHIFT;
   2257 		}
   2258 	}
   2259 
   2260 	/* XXX For other than 82580? */
   2261 	if (sc->sc_type == WM_T_82580) {
   2262 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2263 		if (nvmword & __BIT(13))
   2264 			sc->sc_ctrl |= CTRL_ILOS;
   2265 	}
   2266 
   2267 #if 0
   2268 	if (sc->sc_type >= WM_T_82544) {
   2269 		if (cfg1 & NVM_CFG1_IPS0)
   2270 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2271 		if (cfg1 & NVM_CFG1_IPS1)
   2272 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2273 		sc->sc_ctrl_ext |=
   2274 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2275 		    CTRL_EXT_SWDPIO_SHIFT;
   2276 		sc->sc_ctrl_ext |=
   2277 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2278 		    CTRL_EXT_SWDPINS_SHIFT;
   2279 	} else {
   2280 		sc->sc_ctrl_ext |=
   2281 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2282 		    CTRL_EXT_SWDPIO_SHIFT;
   2283 	}
   2284 #endif
   2285 
   2286 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2287 #if 0
   2288 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2289 #endif
   2290 
   2291 	if (sc->sc_type == WM_T_PCH) {
   2292 		uint16_t val;
   2293 
   2294 		/* Save the NVM K1 bit setting */
   2295 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2296 
   2297 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2298 			sc->sc_nvm_k1_enabled = 1;
   2299 		else
   2300 			sc->sc_nvm_k1_enabled = 0;
   2301 	}
   2302 
   2303 	/*
   2304 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2305 	 * media structures accordingly.
   2306 	 */
   2307 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2308 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2309 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2310 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2311 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2312 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2313 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2314 	} else if (sc->sc_type < WM_T_82543 ||
   2315 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2316 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2317 			aprint_error_dev(sc->sc_dev,
   2318 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2319 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2320 		}
   2321 		wm_tbi_mediainit(sc);
   2322 	} else {
   2323 		switch (sc->sc_type) {
   2324 		case WM_T_82575:
   2325 		case WM_T_82576:
   2326 		case WM_T_82580:
   2327 		case WM_T_I350:
   2328 		case WM_T_I354:
   2329 		case WM_T_I210:
   2330 		case WM_T_I211:
   2331 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2332 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2333 			switch (link_mode) {
   2334 			case CTRL_EXT_LINK_MODE_1000KX:
   2335 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2336 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2337 				break;
   2338 			case CTRL_EXT_LINK_MODE_SGMII:
   2339 				if (wm_sgmii_uses_mdio(sc)) {
   2340 					aprint_verbose_dev(sc->sc_dev,
   2341 					    "SGMII(MDIO)\n");
   2342 					sc->sc_flags |= WM_F_SGMII;
   2343 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2344 					break;
   2345 				}
   2346 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2347 				/*FALLTHROUGH*/
   2348 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2349 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2350 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2351 					if (link_mode
   2352 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2353 						sc->sc_mediatype
   2354 						    = WM_MEDIATYPE_COPPER;
   2355 						sc->sc_flags |= WM_F_SGMII;
   2356 					} else {
   2357 						sc->sc_mediatype
   2358 						    = WM_MEDIATYPE_SERDES;
   2359 						aprint_verbose_dev(sc->sc_dev,
   2360 						    "SERDES\n");
   2361 					}
   2362 					break;
   2363 				}
   2364 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2365 					aprint_verbose_dev(sc->sc_dev,
   2366 					    "SERDES\n");
   2367 
   2368 				/* Change current link mode setting */
   2369 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2370 				switch (sc->sc_mediatype) {
   2371 				case WM_MEDIATYPE_COPPER:
   2372 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2373 					break;
   2374 				case WM_MEDIATYPE_SERDES:
   2375 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2376 					break;
   2377 				default:
   2378 					break;
   2379 				}
   2380 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2381 				break;
   2382 			case CTRL_EXT_LINK_MODE_GMII:
   2383 			default:
   2384 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2385 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2386 				break;
   2387 			}
   2388 
   2389 			reg &= ~CTRL_EXT_I2C_ENA;
   2390 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2391 				reg |= CTRL_EXT_I2C_ENA;
   2392 			else
   2393 				reg &= ~CTRL_EXT_I2C_ENA;
   2394 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2395 
   2396 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2397 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2398 			else
   2399 				wm_tbi_mediainit(sc);
   2400 			break;
   2401 		default:
   2402 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2403 				aprint_error_dev(sc->sc_dev,
   2404 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2405 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2406 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2407 		}
   2408 	}
   2409 
   2410 	ifp = &sc->sc_ethercom.ec_if;
   2411 	xname = device_xname(sc->sc_dev);
   2412 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2413 	ifp->if_softc = sc;
   2414 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2415 	ifp->if_ioctl = wm_ioctl;
   2416 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2417 		ifp->if_start = wm_nq_start;
   2418 		if (sc->sc_nqueues > 1)
   2419 			ifp->if_transmit = wm_nq_transmit;
   2420 	} else
   2421 		ifp->if_start = wm_start;
   2422 	ifp->if_watchdog = wm_watchdog;
   2423 	ifp->if_init = wm_init;
   2424 	ifp->if_stop = wm_stop;
   2425 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2426 	IFQ_SET_READY(&ifp->if_snd);
   2427 
   2428 	/* Check for jumbo frame */
   2429 	switch (sc->sc_type) {
   2430 	case WM_T_82573:
   2431 		/* XXX limited to 9234 if ASPM is disabled */
   2432 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2433 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2434 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2435 		break;
   2436 	case WM_T_82571:
   2437 	case WM_T_82572:
   2438 	case WM_T_82574:
   2439 	case WM_T_82575:
   2440 	case WM_T_82576:
   2441 	case WM_T_82580:
   2442 	case WM_T_I350:
   2443 	case WM_T_I354: /* XXXX ok? */
   2444 	case WM_T_I210:
   2445 	case WM_T_I211:
   2446 	case WM_T_80003:
   2447 	case WM_T_ICH9:
   2448 	case WM_T_ICH10:
   2449 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2450 	case WM_T_PCH_LPT:
   2451 	case WM_T_PCH_SPT:
   2452 		/* XXX limited to 9234 */
   2453 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2454 		break;
   2455 	case WM_T_PCH:
   2456 		/* XXX limited to 4096 */
   2457 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2458 		break;
   2459 	case WM_T_82542_2_0:
   2460 	case WM_T_82542_2_1:
   2461 	case WM_T_82583:
   2462 	case WM_T_ICH8:
   2463 		/* No support for jumbo frame */
   2464 		break;
   2465 	default:
   2466 		/* ETHER_MAX_LEN_JUMBO */
   2467 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2468 		break;
   2469 	}
   2470 
   2471 	/* If we're a i82543 or greater, we can support VLANs. */
   2472 	if (sc->sc_type >= WM_T_82543)
   2473 		sc->sc_ethercom.ec_capabilities |=
   2474 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2475 
   2476 	/*
   2477 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2478 	 * on i82543 and later.
   2479 	 */
   2480 	if (sc->sc_type >= WM_T_82543) {
   2481 		ifp->if_capabilities |=
   2482 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2483 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2484 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2485 		    IFCAP_CSUM_TCPv6_Tx |
   2486 		    IFCAP_CSUM_UDPv6_Tx;
   2487 	}
   2488 
   2489 	/*
   2490 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2491 	 *
   2492 	 *	82541GI (8086:1076) ... no
   2493 	 *	82572EI (8086:10b9) ... yes
   2494 	 */
   2495 	if (sc->sc_type >= WM_T_82571) {
   2496 		ifp->if_capabilities |=
   2497 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2498 	}
   2499 
   2500 	/*
   2501 	 * If we're a i82544 or greater (except i82547), we can do
   2502 	 * TCP segmentation offload.
   2503 	 */
   2504 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2505 		ifp->if_capabilities |= IFCAP_TSOv4;
   2506 	}
   2507 
   2508 	if (sc->sc_type >= WM_T_82571) {
   2509 		ifp->if_capabilities |= IFCAP_TSOv6;
   2510 	}
   2511 
   2512 #ifdef WM_MPSAFE
   2513 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2514 #else
   2515 	sc->sc_core_lock = NULL;
   2516 #endif
   2517 
   2518 	/* Attach the interface. */
   2519 	if_initialize(ifp);
   2520 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2521 	ether_ifattach(ifp, enaddr);
   2522 	if_register(ifp);
   2523 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2524 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2525 			  RND_FLAG_DEFAULT);
   2526 
   2527 #ifdef WM_EVENT_COUNTERS
   2528 	/* Attach event counters. */
   2529 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2530 	    NULL, xname, "txsstall");
   2531 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2532 	    NULL, xname, "txdstall");
   2533 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2534 	    NULL, xname, "txfifo_stall");
   2535 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2536 	    NULL, xname, "txdw");
   2537 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2538 	    NULL, xname, "txqe");
   2539 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2540 	    NULL, xname, "rxintr");
   2541 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2542 	    NULL, xname, "linkintr");
   2543 
   2544 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2545 	    NULL, xname, "rxipsum");
   2546 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2547 	    NULL, xname, "rxtusum");
   2548 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2549 	    NULL, xname, "txipsum");
   2550 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2551 	    NULL, xname, "txtusum");
   2552 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2553 	    NULL, xname, "txtusum6");
   2554 
   2555 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2556 	    NULL, xname, "txtso");
   2557 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2558 	    NULL, xname, "txtso6");
   2559 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2560 	    NULL, xname, "txtsopain");
   2561 
   2562 	for (i = 0; i < WM_NTXSEGS; i++) {
   2563 		snprintf(wm_txseg_evcnt_names[i],
   2564 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2565 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2566 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2567 	}
   2568 
   2569 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2570 	    NULL, xname, "txdrop");
   2571 
   2572 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2573 	    NULL, xname, "tu");
   2574 
   2575 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2576 	    NULL, xname, "tx_xoff");
   2577 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2578 	    NULL, xname, "tx_xon");
   2579 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2580 	    NULL, xname, "rx_xoff");
   2581 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2582 	    NULL, xname, "rx_xon");
   2583 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2584 	    NULL, xname, "rx_macctl");
   2585 #endif /* WM_EVENT_COUNTERS */
   2586 
   2587 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2588 		pmf_class_network_register(self, ifp);
   2589 	else
   2590 		aprint_error_dev(self, "couldn't establish power handler\n");
   2591 
   2592 	sc->sc_flags |= WM_F_ATTACHED;
   2593  out:
   2594 	return;
   2595 }
   2596 
   2597 /* The detach function (ca_detach) */
   2598 static int
   2599 wm_detach(device_t self, int flags __unused)
   2600 {
   2601 	struct wm_softc *sc = device_private(self);
   2602 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2603 	int i;
   2604 #ifndef WM_MPSAFE
   2605 	int s;
   2606 #endif
   2607 
   2608 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2609 		return 0;
   2610 
   2611 #ifndef WM_MPSAFE
   2612 	s = splnet();
   2613 #endif
   2614 	/* Stop the interface. Callouts are stopped in it. */
   2615 	wm_stop(ifp, 1);
   2616 
   2617 #ifndef WM_MPSAFE
   2618 	splx(s);
   2619 #endif
   2620 
   2621 	pmf_device_deregister(self);
   2622 
   2623 	/* Tell the firmware about the release */
   2624 	WM_CORE_LOCK(sc);
   2625 	wm_release_manageability(sc);
   2626 	wm_release_hw_control(sc);
   2627 	WM_CORE_UNLOCK(sc);
   2628 
   2629 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2630 
   2631 	/* Delete all remaining media. */
   2632 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2633 
   2634 	ether_ifdetach(ifp);
   2635 	if_detach(ifp);
   2636 	if_percpuq_destroy(sc->sc_ipq);
   2637 
   2638 	/* Unload RX dmamaps and free mbufs */
   2639 	for (i = 0; i < sc->sc_nqueues; i++) {
   2640 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2641 		WM_RX_LOCK(rxq);
   2642 		wm_rxdrain(rxq);
   2643 		WM_RX_UNLOCK(rxq);
   2644 	}
   2645 	/* Must unlock here */
   2646 
   2647 	/* Disestablish the interrupt handler */
   2648 	for (i = 0; i < sc->sc_nintrs; i++) {
   2649 		if (sc->sc_ihs[i] != NULL) {
   2650 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2651 			sc->sc_ihs[i] = NULL;
   2652 		}
   2653 	}
   2654 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2655 
   2656 	wm_free_txrx_queues(sc);
   2657 
   2658 	/* Unmap the registers */
   2659 	if (sc->sc_ss) {
   2660 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2661 		sc->sc_ss = 0;
   2662 	}
   2663 	if (sc->sc_ios) {
   2664 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2665 		sc->sc_ios = 0;
   2666 	}
   2667 	if (sc->sc_flashs) {
   2668 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2669 		sc->sc_flashs = 0;
   2670 	}
   2671 
   2672 	if (sc->sc_core_lock)
   2673 		mutex_obj_free(sc->sc_core_lock);
   2674 
   2675 	return 0;
   2676 }
   2677 
   2678 static bool
   2679 wm_suspend(device_t self, const pmf_qual_t *qual)
   2680 {
   2681 	struct wm_softc *sc = device_private(self);
   2682 
   2683 	wm_release_manageability(sc);
   2684 	wm_release_hw_control(sc);
   2685 #ifdef WM_WOL
   2686 	wm_enable_wakeup(sc);
   2687 #endif
   2688 
   2689 	return true;
   2690 }
   2691 
   2692 static bool
   2693 wm_resume(device_t self, const pmf_qual_t *qual)
   2694 {
   2695 	struct wm_softc *sc = device_private(self);
   2696 
   2697 	wm_init_manageability(sc);
   2698 
   2699 	return true;
   2700 }
   2701 
   2702 /*
   2703  * wm_watchdog:		[ifnet interface function]
   2704  *
   2705  *	Watchdog timer handler.
   2706  */
   2707 static void
   2708 wm_watchdog(struct ifnet *ifp)
   2709 {
   2710 	int qid;
   2711 	struct wm_softc *sc = ifp->if_softc;
   2712 
   2713 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2714 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2715 
   2716 		wm_watchdog_txq(ifp, txq);
   2717 	}
   2718 
   2719 	/* Reset the interface. */
   2720 	(void) wm_init(ifp);
   2721 
   2722 	/*
   2723 	 * There are still some upper layer processing which call
   2724 	 * ifp->if_start(). e.g. ALTQ
   2725 	 */
   2726 	/* Try to get more packets going. */
   2727 	ifp->if_start(ifp);
   2728 }
   2729 
   2730 static void
   2731 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2732 {
   2733 	struct wm_softc *sc = ifp->if_softc;
   2734 
   2735 	/*
   2736 	 * Since we're using delayed interrupts, sweep up
   2737 	 * before we report an error.
   2738 	 */
   2739 	WM_TX_LOCK(txq);
   2740 	wm_txeof(sc, txq);
   2741 	WM_TX_UNLOCK(txq);
   2742 
   2743 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2744 #ifdef WM_DEBUG
   2745 		int i, j;
   2746 		struct wm_txsoft *txs;
   2747 #endif
   2748 		log(LOG_ERR,
   2749 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2750 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2751 		    txq->txq_next);
   2752 		ifp->if_oerrors++;
   2753 #ifdef WM_DEBUG
   2754 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2755 		    i = WM_NEXTTXS(txq, i)) {
   2756 		    txs = &txq->txq_soft[i];
   2757 		    printf("txs %d tx %d -> %d\n",
   2758 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2759 		    for (j = txs->txs_firstdesc; ;
   2760 			j = WM_NEXTTX(txq, j)) {
   2761 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2762 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2763 			printf("\t %#08x%08x\n",
   2764 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2765 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2766 			if (j == txs->txs_lastdesc)
   2767 				break;
   2768 			}
   2769 		}
   2770 #endif
   2771 	}
   2772 }
   2773 
   2774 /*
   2775  * wm_tick:
   2776  *
   2777  *	One second timer, used to check link status, sweep up
   2778  *	completed transmit jobs, etc.
   2779  */
   2780 static void
   2781 wm_tick(void *arg)
   2782 {
   2783 	struct wm_softc *sc = arg;
   2784 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2785 #ifndef WM_MPSAFE
   2786 	int s;
   2787 
   2788 	s = splnet();
   2789 #endif
   2790 
   2791 	WM_CORE_LOCK(sc);
   2792 
   2793 	if (sc->sc_stopping)
   2794 		goto out;
   2795 
   2796 	if (sc->sc_type >= WM_T_82542_2_1) {
   2797 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2798 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2799 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2800 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2801 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2802 	}
   2803 
   2804 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2805 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2806 	    + CSR_READ(sc, WMREG_CRCERRS)
   2807 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2808 	    + CSR_READ(sc, WMREG_SYMERRC)
   2809 	    + CSR_READ(sc, WMREG_RXERRC)
   2810 	    + CSR_READ(sc, WMREG_SEC)
   2811 	    + CSR_READ(sc, WMREG_CEXTERR)
   2812 	    + CSR_READ(sc, WMREG_RLEC);
   2813 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2814 
   2815 	if (sc->sc_flags & WM_F_HAS_MII)
   2816 		mii_tick(&sc->sc_mii);
   2817 	else if ((sc->sc_type >= WM_T_82575)
   2818 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2819 		wm_serdes_tick(sc);
   2820 	else
   2821 		wm_tbi_tick(sc);
   2822 
   2823 out:
   2824 	WM_CORE_UNLOCK(sc);
   2825 #ifndef WM_MPSAFE
   2826 	splx(s);
   2827 #endif
   2828 
   2829 	if (!sc->sc_stopping)
   2830 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2831 }
   2832 
   2833 static int
   2834 wm_ifflags_cb(struct ethercom *ec)
   2835 {
   2836 	struct ifnet *ifp = &ec->ec_if;
   2837 	struct wm_softc *sc = ifp->if_softc;
   2838 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2839 	int rc = 0;
   2840 
   2841 	WM_CORE_LOCK(sc);
   2842 
   2843 	if (change != 0)
   2844 		sc->sc_if_flags = ifp->if_flags;
   2845 
   2846 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2847 		rc = ENETRESET;
   2848 		goto out;
   2849 	}
   2850 
   2851 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2852 		wm_set_filter(sc);
   2853 
   2854 	wm_set_vlan(sc);
   2855 
   2856 out:
   2857 	WM_CORE_UNLOCK(sc);
   2858 
   2859 	return rc;
   2860 }
   2861 
   2862 /*
   2863  * wm_ioctl:		[ifnet interface function]
   2864  *
   2865  *	Handle control requests from the operator.
   2866  */
   2867 static int
   2868 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2869 {
   2870 	struct wm_softc *sc = ifp->if_softc;
   2871 	struct ifreq *ifr = (struct ifreq *) data;
   2872 	struct ifaddr *ifa = (struct ifaddr *)data;
   2873 	struct sockaddr_dl *sdl;
   2874 	int s, error;
   2875 
   2876 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2877 		device_xname(sc->sc_dev), __func__));
   2878 #ifndef WM_MPSAFE
   2879 	s = splnet();
   2880 #endif
   2881 	switch (cmd) {
   2882 	case SIOCSIFMEDIA:
   2883 	case SIOCGIFMEDIA:
   2884 		WM_CORE_LOCK(sc);
   2885 		/* Flow control requires full-duplex mode. */
   2886 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2887 		    (ifr->ifr_media & IFM_FDX) == 0)
   2888 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2889 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2890 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2891 				/* We can do both TXPAUSE and RXPAUSE. */
   2892 				ifr->ifr_media |=
   2893 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2894 			}
   2895 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2896 		}
   2897 		WM_CORE_UNLOCK(sc);
   2898 #ifdef WM_MPSAFE
   2899 		s = splnet();
   2900 #endif
   2901 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2902 #ifdef WM_MPSAFE
   2903 		splx(s);
   2904 #endif
   2905 		break;
   2906 	case SIOCINITIFADDR:
   2907 		WM_CORE_LOCK(sc);
   2908 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2909 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2910 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2911 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2912 			/* unicast address is first multicast entry */
   2913 			wm_set_filter(sc);
   2914 			error = 0;
   2915 			WM_CORE_UNLOCK(sc);
   2916 			break;
   2917 		}
   2918 		WM_CORE_UNLOCK(sc);
   2919 		/*FALLTHROUGH*/
   2920 	default:
   2921 #ifdef WM_MPSAFE
   2922 		s = splnet();
   2923 #endif
   2924 		/* It may call wm_start, so unlock here */
   2925 		error = ether_ioctl(ifp, cmd, data);
   2926 #ifdef WM_MPSAFE
   2927 		splx(s);
   2928 #endif
   2929 		if (error != ENETRESET)
   2930 			break;
   2931 
   2932 		error = 0;
   2933 
   2934 		if (cmd == SIOCSIFCAP) {
   2935 			error = (*ifp->if_init)(ifp);
   2936 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2937 			;
   2938 		else if (ifp->if_flags & IFF_RUNNING) {
   2939 			/*
   2940 			 * Multicast list has changed; set the hardware filter
   2941 			 * accordingly.
   2942 			 */
   2943 			WM_CORE_LOCK(sc);
   2944 			wm_set_filter(sc);
   2945 			WM_CORE_UNLOCK(sc);
   2946 		}
   2947 		break;
   2948 	}
   2949 
   2950 #ifndef WM_MPSAFE
   2951 	splx(s);
   2952 #endif
   2953 	return error;
   2954 }
   2955 
   2956 /* MAC address related */
   2957 
   2958 /*
   2959  * Get the offset of MAC address and return it.
   2960  * If error occured, use offset 0.
   2961  */
   2962 static uint16_t
   2963 wm_check_alt_mac_addr(struct wm_softc *sc)
   2964 {
   2965 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2966 	uint16_t offset = NVM_OFF_MACADDR;
   2967 
   2968 	/* Try to read alternative MAC address pointer */
   2969 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2970 		return 0;
   2971 
   2972 	/* Check pointer if it's valid or not. */
   2973 	if ((offset == 0x0000) || (offset == 0xffff))
   2974 		return 0;
   2975 
   2976 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2977 	/*
   2978 	 * Check whether alternative MAC address is valid or not.
   2979 	 * Some cards have non 0xffff pointer but those don't use
   2980 	 * alternative MAC address in reality.
   2981 	 *
   2982 	 * Check whether the broadcast bit is set or not.
   2983 	 */
   2984 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2985 		if (((myea[0] & 0xff) & 0x01) == 0)
   2986 			return offset; /* Found */
   2987 
   2988 	/* Not found */
   2989 	return 0;
   2990 }
   2991 
   2992 static int
   2993 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2994 {
   2995 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2996 	uint16_t offset = NVM_OFF_MACADDR;
   2997 	int do_invert = 0;
   2998 
   2999 	switch (sc->sc_type) {
   3000 	case WM_T_82580:
   3001 	case WM_T_I350:
   3002 	case WM_T_I354:
   3003 		/* EEPROM Top Level Partitioning */
   3004 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3005 		break;
   3006 	case WM_T_82571:
   3007 	case WM_T_82575:
   3008 	case WM_T_82576:
   3009 	case WM_T_80003:
   3010 	case WM_T_I210:
   3011 	case WM_T_I211:
   3012 		offset = wm_check_alt_mac_addr(sc);
   3013 		if (offset == 0)
   3014 			if ((sc->sc_funcid & 0x01) == 1)
   3015 				do_invert = 1;
   3016 		break;
   3017 	default:
   3018 		if ((sc->sc_funcid & 0x01) == 1)
   3019 			do_invert = 1;
   3020 		break;
   3021 	}
   3022 
   3023 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   3024 		myea) != 0)
   3025 		goto bad;
   3026 
   3027 	enaddr[0] = myea[0] & 0xff;
   3028 	enaddr[1] = myea[0] >> 8;
   3029 	enaddr[2] = myea[1] & 0xff;
   3030 	enaddr[3] = myea[1] >> 8;
   3031 	enaddr[4] = myea[2] & 0xff;
   3032 	enaddr[5] = myea[2] >> 8;
   3033 
   3034 	/*
   3035 	 * Toggle the LSB of the MAC address on the second port
   3036 	 * of some dual port cards.
   3037 	 */
   3038 	if (do_invert != 0)
   3039 		enaddr[5] ^= 1;
   3040 
   3041 	return 0;
   3042 
   3043  bad:
   3044 	return -1;
   3045 }
   3046 
   3047 /*
   3048  * wm_set_ral:
   3049  *
   3050  *	Set an entery in the receive address list.
   3051  */
   3052 static void
   3053 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3054 {
   3055 	uint32_t ral_lo, ral_hi;
   3056 
   3057 	if (enaddr != NULL) {
   3058 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3059 		    (enaddr[3] << 24);
   3060 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3061 		ral_hi |= RAL_AV;
   3062 	} else {
   3063 		ral_lo = 0;
   3064 		ral_hi = 0;
   3065 	}
   3066 
   3067 	if (sc->sc_type >= WM_T_82544) {
   3068 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3069 		    ral_lo);
   3070 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3071 		    ral_hi);
   3072 	} else {
   3073 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3074 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3075 	}
   3076 }
   3077 
   3078 /*
   3079  * wm_mchash:
   3080  *
   3081  *	Compute the hash of the multicast address for the 4096-bit
   3082  *	multicast filter.
   3083  */
   3084 static uint32_t
   3085 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3086 {
   3087 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3088 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3089 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3090 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3091 	uint32_t hash;
   3092 
   3093 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3094 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3095 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3096 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3097 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3098 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3099 		return (hash & 0x3ff);
   3100 	}
   3101 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3102 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3103 
   3104 	return (hash & 0xfff);
   3105 }
   3106 
   3107 /*
   3108  * wm_set_filter:
   3109  *
   3110  *	Set up the receive filter.
   3111  */
   3112 static void
   3113 wm_set_filter(struct wm_softc *sc)
   3114 {
   3115 	struct ethercom *ec = &sc->sc_ethercom;
   3116 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3117 	struct ether_multi *enm;
   3118 	struct ether_multistep step;
   3119 	bus_addr_t mta_reg;
   3120 	uint32_t hash, reg, bit;
   3121 	int i, size, ralmax;
   3122 
   3123 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3124 		device_xname(sc->sc_dev), __func__));
   3125 	if (sc->sc_type >= WM_T_82544)
   3126 		mta_reg = WMREG_CORDOVA_MTA;
   3127 	else
   3128 		mta_reg = WMREG_MTA;
   3129 
   3130 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3131 
   3132 	if (ifp->if_flags & IFF_BROADCAST)
   3133 		sc->sc_rctl |= RCTL_BAM;
   3134 	if (ifp->if_flags & IFF_PROMISC) {
   3135 		sc->sc_rctl |= RCTL_UPE;
   3136 		goto allmulti;
   3137 	}
   3138 
   3139 	/*
   3140 	 * Set the station address in the first RAL slot, and
   3141 	 * clear the remaining slots.
   3142 	 */
   3143 	if (sc->sc_type == WM_T_ICH8)
   3144 		size = WM_RAL_TABSIZE_ICH8 -1;
   3145 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3146 	    || (sc->sc_type == WM_T_PCH))
   3147 		size = WM_RAL_TABSIZE_ICH8;
   3148 	else if (sc->sc_type == WM_T_PCH2)
   3149 		size = WM_RAL_TABSIZE_PCH2;
   3150 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3151 		size = WM_RAL_TABSIZE_PCH_LPT;
   3152 	else if (sc->sc_type == WM_T_82575)
   3153 		size = WM_RAL_TABSIZE_82575;
   3154 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3155 		size = WM_RAL_TABSIZE_82576;
   3156 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3157 		size = WM_RAL_TABSIZE_I350;
   3158 	else
   3159 		size = WM_RAL_TABSIZE;
   3160 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3161 
   3162 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3163 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3164 		switch (i) {
   3165 		case 0:
   3166 			/* We can use all entries */
   3167 			ralmax = size;
   3168 			break;
   3169 		case 1:
   3170 			/* Only RAR[0] */
   3171 			ralmax = 1;
   3172 			break;
   3173 		default:
   3174 			/* available SHRA + RAR[0] */
   3175 			ralmax = i + 1;
   3176 		}
   3177 	} else
   3178 		ralmax = size;
   3179 	for (i = 1; i < size; i++) {
   3180 		if (i < ralmax)
   3181 			wm_set_ral(sc, NULL, i);
   3182 	}
   3183 
   3184 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3185 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3186 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3187 	    || (sc->sc_type == WM_T_PCH_SPT))
   3188 		size = WM_ICH8_MC_TABSIZE;
   3189 	else
   3190 		size = WM_MC_TABSIZE;
   3191 	/* Clear out the multicast table. */
   3192 	for (i = 0; i < size; i++)
   3193 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3194 
   3195 	ETHER_FIRST_MULTI(step, ec, enm);
   3196 	while (enm != NULL) {
   3197 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3198 			/*
   3199 			 * We must listen to a range of multicast addresses.
   3200 			 * For now, just accept all multicasts, rather than
   3201 			 * trying to set only those filter bits needed to match
   3202 			 * the range.  (At this time, the only use of address
   3203 			 * ranges is for IP multicast routing, for which the
   3204 			 * range is big enough to require all bits set.)
   3205 			 */
   3206 			goto allmulti;
   3207 		}
   3208 
   3209 		hash = wm_mchash(sc, enm->enm_addrlo);
   3210 
   3211 		reg = (hash >> 5);
   3212 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3213 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3214 		    || (sc->sc_type == WM_T_PCH2)
   3215 		    || (sc->sc_type == WM_T_PCH_LPT)
   3216 		    || (sc->sc_type == WM_T_PCH_SPT))
   3217 			reg &= 0x1f;
   3218 		else
   3219 			reg &= 0x7f;
   3220 		bit = hash & 0x1f;
   3221 
   3222 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3223 		hash |= 1U << bit;
   3224 
   3225 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3226 			/*
   3227 			 * 82544 Errata 9: Certain register cannot be written
   3228 			 * with particular alignments in PCI-X bus operation
   3229 			 * (FCAH, MTA and VFTA).
   3230 			 */
   3231 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3232 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3233 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3234 		} else
   3235 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3236 
   3237 		ETHER_NEXT_MULTI(step, enm);
   3238 	}
   3239 
   3240 	ifp->if_flags &= ~IFF_ALLMULTI;
   3241 	goto setit;
   3242 
   3243  allmulti:
   3244 	ifp->if_flags |= IFF_ALLMULTI;
   3245 	sc->sc_rctl |= RCTL_MPE;
   3246 
   3247  setit:
   3248 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3249 }
   3250 
   3251 /* Reset and init related */
   3252 
   3253 static void
   3254 wm_set_vlan(struct wm_softc *sc)
   3255 {
   3256 
   3257 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3258 		device_xname(sc->sc_dev), __func__));
   3259 	/* Deal with VLAN enables. */
   3260 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3261 		sc->sc_ctrl |= CTRL_VME;
   3262 	else
   3263 		sc->sc_ctrl &= ~CTRL_VME;
   3264 
   3265 	/* Write the control registers. */
   3266 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3267 }
   3268 
   3269 static void
   3270 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3271 {
   3272 	uint32_t gcr;
   3273 	pcireg_t ctrl2;
   3274 
   3275 	gcr = CSR_READ(sc, WMREG_GCR);
   3276 
   3277 	/* Only take action if timeout value is defaulted to 0 */
   3278 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3279 		goto out;
   3280 
   3281 	if ((gcr & GCR_CAP_VER2) == 0) {
   3282 		gcr |= GCR_CMPL_TMOUT_10MS;
   3283 		goto out;
   3284 	}
   3285 
   3286 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3287 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3288 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3289 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3290 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3291 
   3292 out:
   3293 	/* Disable completion timeout resend */
   3294 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3295 
   3296 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3297 }
   3298 
   3299 void
   3300 wm_get_auto_rd_done(struct wm_softc *sc)
   3301 {
   3302 	int i;
   3303 
   3304 	/* wait for eeprom to reload */
   3305 	switch (sc->sc_type) {
   3306 	case WM_T_82571:
   3307 	case WM_T_82572:
   3308 	case WM_T_82573:
   3309 	case WM_T_82574:
   3310 	case WM_T_82583:
   3311 	case WM_T_82575:
   3312 	case WM_T_82576:
   3313 	case WM_T_82580:
   3314 	case WM_T_I350:
   3315 	case WM_T_I354:
   3316 	case WM_T_I210:
   3317 	case WM_T_I211:
   3318 	case WM_T_80003:
   3319 	case WM_T_ICH8:
   3320 	case WM_T_ICH9:
   3321 		for (i = 0; i < 10; i++) {
   3322 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3323 				break;
   3324 			delay(1000);
   3325 		}
   3326 		if (i == 10) {
   3327 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3328 			    "complete\n", device_xname(sc->sc_dev));
   3329 		}
   3330 		break;
   3331 	default:
   3332 		break;
   3333 	}
   3334 }
   3335 
   3336 void
   3337 wm_lan_init_done(struct wm_softc *sc)
   3338 {
   3339 	uint32_t reg = 0;
   3340 	int i;
   3341 
   3342 	/* wait for eeprom to reload */
   3343 	switch (sc->sc_type) {
   3344 	case WM_T_ICH10:
   3345 	case WM_T_PCH:
   3346 	case WM_T_PCH2:
   3347 	case WM_T_PCH_LPT:
   3348 	case WM_T_PCH_SPT:
   3349 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3350 			reg = CSR_READ(sc, WMREG_STATUS);
   3351 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3352 				break;
   3353 			delay(100);
   3354 		}
   3355 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3356 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3357 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3358 		}
   3359 		break;
   3360 	default:
   3361 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3362 		    __func__);
   3363 		break;
   3364 	}
   3365 
   3366 	reg &= ~STATUS_LAN_INIT_DONE;
   3367 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3368 }
   3369 
   3370 void
   3371 wm_get_cfg_done(struct wm_softc *sc)
   3372 {
   3373 	int mask;
   3374 	uint32_t reg;
   3375 	int i;
   3376 
   3377 	/* wait for eeprom to reload */
   3378 	switch (sc->sc_type) {
   3379 	case WM_T_82542_2_0:
   3380 	case WM_T_82542_2_1:
   3381 		/* null */
   3382 		break;
   3383 	case WM_T_82543:
   3384 	case WM_T_82544:
   3385 	case WM_T_82540:
   3386 	case WM_T_82545:
   3387 	case WM_T_82545_3:
   3388 	case WM_T_82546:
   3389 	case WM_T_82546_3:
   3390 	case WM_T_82541:
   3391 	case WM_T_82541_2:
   3392 	case WM_T_82547:
   3393 	case WM_T_82547_2:
   3394 	case WM_T_82573:
   3395 	case WM_T_82574:
   3396 	case WM_T_82583:
   3397 		/* generic */
   3398 		delay(10*1000);
   3399 		break;
   3400 	case WM_T_80003:
   3401 	case WM_T_82571:
   3402 	case WM_T_82572:
   3403 	case WM_T_82575:
   3404 	case WM_T_82576:
   3405 	case WM_T_82580:
   3406 	case WM_T_I350:
   3407 	case WM_T_I354:
   3408 	case WM_T_I210:
   3409 	case WM_T_I211:
   3410 		if (sc->sc_type == WM_T_82571) {
   3411 			/* Only 82571 shares port 0 */
   3412 			mask = EEMNGCTL_CFGDONE_0;
   3413 		} else
   3414 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3415 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3416 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3417 				break;
   3418 			delay(1000);
   3419 		}
   3420 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3421 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3422 				device_xname(sc->sc_dev), __func__));
   3423 		}
   3424 		break;
   3425 	case WM_T_ICH8:
   3426 	case WM_T_ICH9:
   3427 	case WM_T_ICH10:
   3428 	case WM_T_PCH:
   3429 	case WM_T_PCH2:
   3430 	case WM_T_PCH_LPT:
   3431 	case WM_T_PCH_SPT:
   3432 		delay(10*1000);
   3433 		if (sc->sc_type >= WM_T_ICH10)
   3434 			wm_lan_init_done(sc);
   3435 		else
   3436 			wm_get_auto_rd_done(sc);
   3437 
   3438 		reg = CSR_READ(sc, WMREG_STATUS);
   3439 		if ((reg & STATUS_PHYRA) != 0)
   3440 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3441 		break;
   3442 	default:
   3443 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3444 		    __func__);
   3445 		break;
   3446 	}
   3447 }
   3448 
   3449 /* Init hardware bits */
   3450 void
   3451 wm_initialize_hardware_bits(struct wm_softc *sc)
   3452 {
   3453 	uint32_t tarc0, tarc1, reg;
   3454 
   3455 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3456 		device_xname(sc->sc_dev), __func__));
   3457 	/* For 82571 variant, 80003 and ICHs */
   3458 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3459 	    || (sc->sc_type >= WM_T_80003)) {
   3460 
   3461 		/* Transmit Descriptor Control 0 */
   3462 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3463 		reg |= TXDCTL_COUNT_DESC;
   3464 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3465 
   3466 		/* Transmit Descriptor Control 1 */
   3467 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3468 		reg |= TXDCTL_COUNT_DESC;
   3469 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3470 
   3471 		/* TARC0 */
   3472 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3473 		switch (sc->sc_type) {
   3474 		case WM_T_82571:
   3475 		case WM_T_82572:
   3476 		case WM_T_82573:
   3477 		case WM_T_82574:
   3478 		case WM_T_82583:
   3479 		case WM_T_80003:
   3480 			/* Clear bits 30..27 */
   3481 			tarc0 &= ~__BITS(30, 27);
   3482 			break;
   3483 		default:
   3484 			break;
   3485 		}
   3486 
   3487 		switch (sc->sc_type) {
   3488 		case WM_T_82571:
   3489 		case WM_T_82572:
   3490 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3491 
   3492 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3493 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3494 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3495 			/* 8257[12] Errata No.7 */
   3496 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3497 
   3498 			/* TARC1 bit 28 */
   3499 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3500 				tarc1 &= ~__BIT(28);
   3501 			else
   3502 				tarc1 |= __BIT(28);
   3503 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3504 
   3505 			/*
   3506 			 * 8257[12] Errata No.13
   3507 			 * Disable Dyamic Clock Gating.
   3508 			 */
   3509 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3510 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3511 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3512 			break;
   3513 		case WM_T_82573:
   3514 		case WM_T_82574:
   3515 		case WM_T_82583:
   3516 			if ((sc->sc_type == WM_T_82574)
   3517 			    || (sc->sc_type == WM_T_82583))
   3518 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3519 
   3520 			/* Extended Device Control */
   3521 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3522 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3523 			reg |= __BIT(22);	/* Set bit 22 */
   3524 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3525 
   3526 			/* Device Control */
   3527 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3528 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3529 
   3530 			/* PCIe Control Register */
   3531 			/*
   3532 			 * 82573 Errata (unknown).
   3533 			 *
   3534 			 * 82574 Errata 25 and 82583 Errata 12
   3535 			 * "Dropped Rx Packets":
   3536 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3537 			 */
   3538 			reg = CSR_READ(sc, WMREG_GCR);
   3539 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3540 			CSR_WRITE(sc, WMREG_GCR, reg);
   3541 
   3542 			if ((sc->sc_type == WM_T_82574)
   3543 			    || (sc->sc_type == WM_T_82583)) {
   3544 				/*
   3545 				 * Document says this bit must be set for
   3546 				 * proper operation.
   3547 				 */
   3548 				reg = CSR_READ(sc, WMREG_GCR);
   3549 				reg |= __BIT(22);
   3550 				CSR_WRITE(sc, WMREG_GCR, reg);
   3551 
   3552 				/*
   3553 				 * Apply workaround for hardware errata
   3554 				 * documented in errata docs Fixes issue where
   3555 				 * some error prone or unreliable PCIe
   3556 				 * completions are occurring, particularly
   3557 				 * with ASPM enabled. Without fix, issue can
   3558 				 * cause Tx timeouts.
   3559 				 */
   3560 				reg = CSR_READ(sc, WMREG_GCR2);
   3561 				reg |= __BIT(0);
   3562 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3563 			}
   3564 			break;
   3565 		case WM_T_80003:
   3566 			/* TARC0 */
   3567 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3568 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3569 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3570 
   3571 			/* TARC1 bit 28 */
   3572 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3573 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3574 				tarc1 &= ~__BIT(28);
   3575 			else
   3576 				tarc1 |= __BIT(28);
   3577 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3578 			break;
   3579 		case WM_T_ICH8:
   3580 		case WM_T_ICH9:
   3581 		case WM_T_ICH10:
   3582 		case WM_T_PCH:
   3583 		case WM_T_PCH2:
   3584 		case WM_T_PCH_LPT:
   3585 		case WM_T_PCH_SPT:
   3586 			/* TARC0 */
   3587 			if ((sc->sc_type == WM_T_ICH8)
   3588 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3589 				/* Set TARC0 bits 29 and 28 */
   3590 				tarc0 |= __BITS(29, 28);
   3591 			}
   3592 			/* Set TARC0 bits 23,24,26,27 */
   3593 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3594 
   3595 			/* CTRL_EXT */
   3596 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3597 			reg |= __BIT(22);	/* Set bit 22 */
   3598 			/*
   3599 			 * Enable PHY low-power state when MAC is at D3
   3600 			 * w/o WoL
   3601 			 */
   3602 			if (sc->sc_type >= WM_T_PCH)
   3603 				reg |= CTRL_EXT_PHYPDEN;
   3604 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3605 
   3606 			/* TARC1 */
   3607 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3608 			/* bit 28 */
   3609 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3610 				tarc1 &= ~__BIT(28);
   3611 			else
   3612 				tarc1 |= __BIT(28);
   3613 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3614 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3615 
   3616 			/* Device Status */
   3617 			if (sc->sc_type == WM_T_ICH8) {
   3618 				reg = CSR_READ(sc, WMREG_STATUS);
   3619 				reg &= ~__BIT(31);
   3620 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3621 
   3622 			}
   3623 
   3624 			/* IOSFPC */
   3625 			if (sc->sc_type == WM_T_PCH_SPT) {
   3626 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3627 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3628 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3629 			}
   3630 			/*
   3631 			 * Work-around descriptor data corruption issue during
   3632 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3633 			 * capability.
   3634 			 */
   3635 			reg = CSR_READ(sc, WMREG_RFCTL);
   3636 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3637 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3638 			break;
   3639 		default:
   3640 			break;
   3641 		}
   3642 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3643 
   3644 		/*
   3645 		 * 8257[12] Errata No.52 and some others.
   3646 		 * Avoid RSS Hash Value bug.
   3647 		 */
   3648 		switch (sc->sc_type) {
   3649 		case WM_T_82571:
   3650 		case WM_T_82572:
   3651 		case WM_T_82573:
   3652 		case WM_T_80003:
   3653 		case WM_T_ICH8:
   3654 			reg = CSR_READ(sc, WMREG_RFCTL);
   3655 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3656 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3657 			break;
   3658 		default:
   3659 			break;
   3660 		}
   3661 	}
   3662 }
   3663 
   3664 static uint32_t
   3665 wm_rxpbs_adjust_82580(uint32_t val)
   3666 {
   3667 	uint32_t rv = 0;
   3668 
   3669 	if (val < __arraycount(wm_82580_rxpbs_table))
   3670 		rv = wm_82580_rxpbs_table[val];
   3671 
   3672 	return rv;
   3673 }
   3674 
   3675 /*
   3676  * wm_reset:
   3677  *
   3678  *	Reset the i82542 chip.
   3679  */
   3680 static void
   3681 wm_reset(struct wm_softc *sc)
   3682 {
   3683 	int phy_reset = 0;
   3684 	int i, error = 0;
   3685 	uint32_t reg, mask;
   3686 
   3687 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3688 		device_xname(sc->sc_dev), __func__));
   3689 	/*
   3690 	 * Allocate on-chip memory according to the MTU size.
   3691 	 * The Packet Buffer Allocation register must be written
   3692 	 * before the chip is reset.
   3693 	 */
   3694 	switch (sc->sc_type) {
   3695 	case WM_T_82547:
   3696 	case WM_T_82547_2:
   3697 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3698 		    PBA_22K : PBA_30K;
   3699 		for (i = 0; i < sc->sc_nqueues; i++) {
   3700 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3701 			txq->txq_fifo_head = 0;
   3702 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3703 			txq->txq_fifo_size =
   3704 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3705 			txq->txq_fifo_stall = 0;
   3706 		}
   3707 		break;
   3708 	case WM_T_82571:
   3709 	case WM_T_82572:
   3710 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3711 	case WM_T_80003:
   3712 		sc->sc_pba = PBA_32K;
   3713 		break;
   3714 	case WM_T_82573:
   3715 		sc->sc_pba = PBA_12K;
   3716 		break;
   3717 	case WM_T_82574:
   3718 	case WM_T_82583:
   3719 		sc->sc_pba = PBA_20K;
   3720 		break;
   3721 	case WM_T_82576:
   3722 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3723 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3724 		break;
   3725 	case WM_T_82580:
   3726 	case WM_T_I350:
   3727 	case WM_T_I354:
   3728 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3729 		break;
   3730 	case WM_T_I210:
   3731 	case WM_T_I211:
   3732 		sc->sc_pba = PBA_34K;
   3733 		break;
   3734 	case WM_T_ICH8:
   3735 		/* Workaround for a bit corruption issue in FIFO memory */
   3736 		sc->sc_pba = PBA_8K;
   3737 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3738 		break;
   3739 	case WM_T_ICH9:
   3740 	case WM_T_ICH10:
   3741 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3742 		    PBA_14K : PBA_10K;
   3743 		break;
   3744 	case WM_T_PCH:
   3745 	case WM_T_PCH2:
   3746 	case WM_T_PCH_LPT:
   3747 	case WM_T_PCH_SPT:
   3748 		sc->sc_pba = PBA_26K;
   3749 		break;
   3750 	default:
   3751 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3752 		    PBA_40K : PBA_48K;
   3753 		break;
   3754 	}
   3755 	/*
   3756 	 * Only old or non-multiqueue devices have the PBA register
   3757 	 * XXX Need special handling for 82575.
   3758 	 */
   3759 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3760 	    || (sc->sc_type == WM_T_82575))
   3761 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3762 
   3763 	/* Prevent the PCI-E bus from sticking */
   3764 	if (sc->sc_flags & WM_F_PCIE) {
   3765 		int timeout = 800;
   3766 
   3767 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3768 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3769 
   3770 		while (timeout--) {
   3771 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3772 			    == 0)
   3773 				break;
   3774 			delay(100);
   3775 		}
   3776 	}
   3777 
   3778 	/* Set the completion timeout for interface */
   3779 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3780 	    || (sc->sc_type == WM_T_82580)
   3781 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3782 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3783 		wm_set_pcie_completion_timeout(sc);
   3784 
   3785 	/* Clear interrupt */
   3786 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3787 	if (sc->sc_nintrs > 1) {
   3788 		if (sc->sc_type != WM_T_82574) {
   3789 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3790 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3791 		} else {
   3792 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3793 		}
   3794 	}
   3795 
   3796 	/* Stop the transmit and receive processes. */
   3797 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3798 	sc->sc_rctl &= ~RCTL_EN;
   3799 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3800 	CSR_WRITE_FLUSH(sc);
   3801 
   3802 	/* XXX set_tbi_sbp_82543() */
   3803 
   3804 	delay(10*1000);
   3805 
   3806 	/* Must acquire the MDIO ownership before MAC reset */
   3807 	switch (sc->sc_type) {
   3808 	case WM_T_82573:
   3809 	case WM_T_82574:
   3810 	case WM_T_82583:
   3811 		error = wm_get_hw_semaphore_82573(sc);
   3812 		break;
   3813 	default:
   3814 		break;
   3815 	}
   3816 
   3817 	/*
   3818 	 * 82541 Errata 29? & 82547 Errata 28?
   3819 	 * See also the description about PHY_RST bit in CTRL register
   3820 	 * in 8254x_GBe_SDM.pdf.
   3821 	 */
   3822 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3823 		CSR_WRITE(sc, WMREG_CTRL,
   3824 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3825 		CSR_WRITE_FLUSH(sc);
   3826 		delay(5000);
   3827 	}
   3828 
   3829 	switch (sc->sc_type) {
   3830 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3831 	case WM_T_82541:
   3832 	case WM_T_82541_2:
   3833 	case WM_T_82547:
   3834 	case WM_T_82547_2:
   3835 		/*
   3836 		 * On some chipsets, a reset through a memory-mapped write
   3837 		 * cycle can cause the chip to reset before completing the
   3838 		 * write cycle.  This causes major headache that can be
   3839 		 * avoided by issuing the reset via indirect register writes
   3840 		 * through I/O space.
   3841 		 *
   3842 		 * So, if we successfully mapped the I/O BAR at attach time,
   3843 		 * use that.  Otherwise, try our luck with a memory-mapped
   3844 		 * reset.
   3845 		 */
   3846 		if (sc->sc_flags & WM_F_IOH_VALID)
   3847 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3848 		else
   3849 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3850 		break;
   3851 	case WM_T_82545_3:
   3852 	case WM_T_82546_3:
   3853 		/* Use the shadow control register on these chips. */
   3854 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3855 		break;
   3856 	case WM_T_80003:
   3857 		mask = swfwphysem[sc->sc_funcid];
   3858 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3859 		wm_get_swfw_semaphore(sc, mask);
   3860 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3861 		wm_put_swfw_semaphore(sc, mask);
   3862 		break;
   3863 	case WM_T_ICH8:
   3864 	case WM_T_ICH9:
   3865 	case WM_T_ICH10:
   3866 	case WM_T_PCH:
   3867 	case WM_T_PCH2:
   3868 	case WM_T_PCH_LPT:
   3869 	case WM_T_PCH_SPT:
   3870 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3871 		if (wm_phy_resetisblocked(sc) == false) {
   3872 			/*
   3873 			 * Gate automatic PHY configuration by hardware on
   3874 			 * non-managed 82579
   3875 			 */
   3876 			if ((sc->sc_type == WM_T_PCH2)
   3877 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3878 				== 0))
   3879 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3880 
   3881 			reg |= CTRL_PHY_RESET;
   3882 			phy_reset = 1;
   3883 		} else
   3884 			printf("XXX reset is blocked!!!\n");
   3885 		wm_get_swfwhw_semaphore(sc);
   3886 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3887 		/* Don't insert a completion barrier when reset */
   3888 		delay(20*1000);
   3889 		wm_put_swfwhw_semaphore(sc);
   3890 		break;
   3891 	case WM_T_82580:
   3892 	case WM_T_I350:
   3893 	case WM_T_I354:
   3894 	case WM_T_I210:
   3895 	case WM_T_I211:
   3896 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3897 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3898 			CSR_WRITE_FLUSH(sc);
   3899 		delay(5000);
   3900 		break;
   3901 	case WM_T_82542_2_0:
   3902 	case WM_T_82542_2_1:
   3903 	case WM_T_82543:
   3904 	case WM_T_82540:
   3905 	case WM_T_82545:
   3906 	case WM_T_82546:
   3907 	case WM_T_82571:
   3908 	case WM_T_82572:
   3909 	case WM_T_82573:
   3910 	case WM_T_82574:
   3911 	case WM_T_82575:
   3912 	case WM_T_82576:
   3913 	case WM_T_82583:
   3914 	default:
   3915 		/* Everything else can safely use the documented method. */
   3916 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3917 		break;
   3918 	}
   3919 
   3920 	/* Must release the MDIO ownership after MAC reset */
   3921 	switch (sc->sc_type) {
   3922 	case WM_T_82573:
   3923 	case WM_T_82574:
   3924 	case WM_T_82583:
   3925 		if (error == 0)
   3926 			wm_put_hw_semaphore_82573(sc);
   3927 		break;
   3928 	default:
   3929 		break;
   3930 	}
   3931 
   3932 	if (phy_reset != 0)
   3933 		wm_get_cfg_done(sc);
   3934 
   3935 	/* reload EEPROM */
   3936 	switch (sc->sc_type) {
   3937 	case WM_T_82542_2_0:
   3938 	case WM_T_82542_2_1:
   3939 	case WM_T_82543:
   3940 	case WM_T_82544:
   3941 		delay(10);
   3942 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3943 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3944 		CSR_WRITE_FLUSH(sc);
   3945 		delay(2000);
   3946 		break;
   3947 	case WM_T_82540:
   3948 	case WM_T_82545:
   3949 	case WM_T_82545_3:
   3950 	case WM_T_82546:
   3951 	case WM_T_82546_3:
   3952 		delay(5*1000);
   3953 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3954 		break;
   3955 	case WM_T_82541:
   3956 	case WM_T_82541_2:
   3957 	case WM_T_82547:
   3958 	case WM_T_82547_2:
   3959 		delay(20000);
   3960 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3961 		break;
   3962 	case WM_T_82571:
   3963 	case WM_T_82572:
   3964 	case WM_T_82573:
   3965 	case WM_T_82574:
   3966 	case WM_T_82583:
   3967 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3968 			delay(10);
   3969 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3970 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3971 			CSR_WRITE_FLUSH(sc);
   3972 		}
   3973 		/* check EECD_EE_AUTORD */
   3974 		wm_get_auto_rd_done(sc);
   3975 		/*
   3976 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3977 		 * is set.
   3978 		 */
   3979 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3980 		    || (sc->sc_type == WM_T_82583))
   3981 			delay(25*1000);
   3982 		break;
   3983 	case WM_T_82575:
   3984 	case WM_T_82576:
   3985 	case WM_T_82580:
   3986 	case WM_T_I350:
   3987 	case WM_T_I354:
   3988 	case WM_T_I210:
   3989 	case WM_T_I211:
   3990 	case WM_T_80003:
   3991 		/* check EECD_EE_AUTORD */
   3992 		wm_get_auto_rd_done(sc);
   3993 		break;
   3994 	case WM_T_ICH8:
   3995 	case WM_T_ICH9:
   3996 	case WM_T_ICH10:
   3997 	case WM_T_PCH:
   3998 	case WM_T_PCH2:
   3999 	case WM_T_PCH_LPT:
   4000 	case WM_T_PCH_SPT:
   4001 		break;
   4002 	default:
   4003 		panic("%s: unknown type\n", __func__);
   4004 	}
   4005 
   4006 	/* Check whether EEPROM is present or not */
   4007 	switch (sc->sc_type) {
   4008 	case WM_T_82575:
   4009 	case WM_T_82576:
   4010 	case WM_T_82580:
   4011 	case WM_T_I350:
   4012 	case WM_T_I354:
   4013 	case WM_T_ICH8:
   4014 	case WM_T_ICH9:
   4015 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4016 			/* Not found */
   4017 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4018 			if (sc->sc_type == WM_T_82575)
   4019 				wm_reset_init_script_82575(sc);
   4020 		}
   4021 		break;
   4022 	default:
   4023 		break;
   4024 	}
   4025 
   4026 	if ((sc->sc_type == WM_T_82580)
   4027 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4028 		/* clear global device reset status bit */
   4029 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4030 	}
   4031 
   4032 	/* Clear any pending interrupt events. */
   4033 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4034 	reg = CSR_READ(sc, WMREG_ICR);
   4035 	if (sc->sc_nintrs > 1) {
   4036 		if (sc->sc_type != WM_T_82574) {
   4037 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4038 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4039 		} else
   4040 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4041 	}
   4042 
   4043 	/* reload sc_ctrl */
   4044 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4045 
   4046 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4047 		wm_set_eee_i350(sc);
   4048 
   4049 	/* dummy read from WUC */
   4050 	if (sc->sc_type == WM_T_PCH)
   4051 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4052 	/*
   4053 	 * For PCH, this write will make sure that any noise will be detected
   4054 	 * as a CRC error and be dropped rather than show up as a bad packet
   4055 	 * to the DMA engine
   4056 	 */
   4057 	if (sc->sc_type == WM_T_PCH)
   4058 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4059 
   4060 	if (sc->sc_type >= WM_T_82544)
   4061 		CSR_WRITE(sc, WMREG_WUC, 0);
   4062 
   4063 	wm_reset_mdicnfg_82580(sc);
   4064 
   4065 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4066 		wm_pll_workaround_i210(sc);
   4067 }
   4068 
   4069 /*
   4070  * wm_add_rxbuf:
   4071  *
   4072  *	Add a receive buffer to the indiciated descriptor.
   4073  */
   4074 static int
   4075 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4076 {
   4077 	struct wm_softc *sc = rxq->rxq_sc;
   4078 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4079 	struct mbuf *m;
   4080 	int error;
   4081 
   4082 	KASSERT(WM_RX_LOCKED(rxq));
   4083 
   4084 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4085 	if (m == NULL)
   4086 		return ENOBUFS;
   4087 
   4088 	MCLGET(m, M_DONTWAIT);
   4089 	if ((m->m_flags & M_EXT) == 0) {
   4090 		m_freem(m);
   4091 		return ENOBUFS;
   4092 	}
   4093 
   4094 	if (rxs->rxs_mbuf != NULL)
   4095 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4096 
   4097 	rxs->rxs_mbuf = m;
   4098 
   4099 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4100 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4101 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4102 	if (error) {
   4103 		/* XXX XXX XXX */
   4104 		aprint_error_dev(sc->sc_dev,
   4105 		    "unable to load rx DMA map %d, error = %d\n",
   4106 		    idx, error);
   4107 		panic("wm_add_rxbuf");
   4108 	}
   4109 
   4110 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4111 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4112 
   4113 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4114 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4115 			wm_init_rxdesc(rxq, idx);
   4116 	} else
   4117 		wm_init_rxdesc(rxq, idx);
   4118 
   4119 	return 0;
   4120 }
   4121 
   4122 /*
   4123  * wm_rxdrain:
   4124  *
   4125  *	Drain the receive queue.
   4126  */
   4127 static void
   4128 wm_rxdrain(struct wm_rxqueue *rxq)
   4129 {
   4130 	struct wm_softc *sc = rxq->rxq_sc;
   4131 	struct wm_rxsoft *rxs;
   4132 	int i;
   4133 
   4134 	KASSERT(WM_RX_LOCKED(rxq));
   4135 
   4136 	for (i = 0; i < WM_NRXDESC; i++) {
   4137 		rxs = &rxq->rxq_soft[i];
   4138 		if (rxs->rxs_mbuf != NULL) {
   4139 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4140 			m_freem(rxs->rxs_mbuf);
   4141 			rxs->rxs_mbuf = NULL;
   4142 		}
   4143 	}
   4144 }
   4145 
   4146 
   4147 /*
   4148  * XXX copy from FreeBSD's sys/net/rss_config.c
   4149  */
   4150 /*
   4151  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4152  * effectiveness may be limited by algorithm choice and available entropy
   4153  * during the boot.
   4154  *
   4155  * XXXRW: And that we don't randomize it yet!
   4156  *
   4157  * This is the default Microsoft RSS specification key which is also
   4158  * the Chelsio T5 firmware default key.
   4159  */
   4160 #define RSS_KEYSIZE 40
   4161 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4162 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4163 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4164 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4165 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4166 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4167 };
   4168 
   4169 /*
   4170  * Caller must pass an array of size sizeof(rss_key).
   4171  *
   4172  * XXX
   4173  * As if_ixgbe may use this function, this function should not be
   4174  * if_wm specific function.
   4175  */
   4176 static void
   4177 wm_rss_getkey(uint8_t *key)
   4178 {
   4179 
   4180 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4181 }
   4182 
   4183 /*
   4184  * Setup registers for RSS.
   4185  *
   4186  * XXX not yet VMDq support
   4187  */
   4188 static void
   4189 wm_init_rss(struct wm_softc *sc)
   4190 {
   4191 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4192 	int i;
   4193 
   4194 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4195 
   4196 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4197 		int qid, reta_ent;
   4198 
   4199 		qid  = i % sc->sc_nqueues;
   4200 		switch(sc->sc_type) {
   4201 		case WM_T_82574:
   4202 			reta_ent = __SHIFTIN(qid,
   4203 			    RETA_ENT_QINDEX_MASK_82574);
   4204 			break;
   4205 		case WM_T_82575:
   4206 			reta_ent = __SHIFTIN(qid,
   4207 			    RETA_ENT_QINDEX1_MASK_82575);
   4208 			break;
   4209 		default:
   4210 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4211 			break;
   4212 		}
   4213 
   4214 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4215 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4216 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4217 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4218 	}
   4219 
   4220 	wm_rss_getkey((uint8_t *)rss_key);
   4221 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4222 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4223 
   4224 	if (sc->sc_type == WM_T_82574)
   4225 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4226 	else
   4227 		mrqc = MRQC_ENABLE_RSS_MQ;
   4228 
   4229 	/* XXXX
   4230 	 * The same as FreeBSD igb.
   4231 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4232 	 */
   4233 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4234 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4235 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4236 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4237 
   4238 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4239 }
   4240 
   4241 /*
   4242  * Adjust TX and RX queue numbers which the system actulally uses.
   4243  *
   4244  * The numbers are affected by below parameters.
   4245  *     - The nubmer of hardware queues
   4246  *     - The number of MSI-X vectors (= "nvectors" argument)
   4247  *     - ncpu
   4248  */
   4249 static void
   4250 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4251 {
   4252 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4253 
   4254 	if (nvectors < 2) {
   4255 		sc->sc_nqueues = 1;
   4256 		return;
   4257 	}
   4258 
   4259 	switch(sc->sc_type) {
   4260 	case WM_T_82572:
   4261 		hw_ntxqueues = 2;
   4262 		hw_nrxqueues = 2;
   4263 		break;
   4264 	case WM_T_82574:
   4265 		hw_ntxqueues = 2;
   4266 		hw_nrxqueues = 2;
   4267 		break;
   4268 	case WM_T_82575:
   4269 		hw_ntxqueues = 4;
   4270 		hw_nrxqueues = 4;
   4271 		break;
   4272 	case WM_T_82576:
   4273 		hw_ntxqueues = 16;
   4274 		hw_nrxqueues = 16;
   4275 		break;
   4276 	case WM_T_82580:
   4277 	case WM_T_I350:
   4278 	case WM_T_I354:
   4279 		hw_ntxqueues = 8;
   4280 		hw_nrxqueues = 8;
   4281 		break;
   4282 	case WM_T_I210:
   4283 		hw_ntxqueues = 4;
   4284 		hw_nrxqueues = 4;
   4285 		break;
   4286 	case WM_T_I211:
   4287 		hw_ntxqueues = 2;
   4288 		hw_nrxqueues = 2;
   4289 		break;
   4290 		/*
   4291 		 * As below ethernet controllers does not support MSI-X,
   4292 		 * this driver let them not use multiqueue.
   4293 		 *     - WM_T_80003
   4294 		 *     - WM_T_ICH8
   4295 		 *     - WM_T_ICH9
   4296 		 *     - WM_T_ICH10
   4297 		 *     - WM_T_PCH
   4298 		 *     - WM_T_PCH2
   4299 		 *     - WM_T_PCH_LPT
   4300 		 */
   4301 	default:
   4302 		hw_ntxqueues = 1;
   4303 		hw_nrxqueues = 1;
   4304 		break;
   4305 	}
   4306 
   4307 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4308 
   4309 	/*
   4310 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4311 	 * the number of queues used actually.
   4312 	 */
   4313 	if (nvectors < hw_nqueues + 1) {
   4314 		sc->sc_nqueues = nvectors - 1;
   4315 	} else {
   4316 		sc->sc_nqueues = hw_nqueues;
   4317 	}
   4318 
   4319 	/*
   4320 	 * As queues more then cpus cannot improve scaling, we limit
   4321 	 * the number of queues used actually.
   4322 	 */
   4323 	if (ncpu < sc->sc_nqueues)
   4324 		sc->sc_nqueues = ncpu;
   4325 }
   4326 
   4327 /*
   4328  * Both single interrupt MSI and INTx can use this function.
   4329  */
   4330 static int
   4331 wm_setup_legacy(struct wm_softc *sc)
   4332 {
   4333 	pci_chipset_tag_t pc = sc->sc_pc;
   4334 	const char *intrstr = NULL;
   4335 	char intrbuf[PCI_INTRSTR_LEN];
   4336 	int error;
   4337 
   4338 	error = wm_alloc_txrx_queues(sc);
   4339 	if (error) {
   4340 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4341 		    error);
   4342 		return ENOMEM;
   4343 	}
   4344 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4345 	    sizeof(intrbuf));
   4346 #ifdef WM_MPSAFE
   4347 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4348 #endif
   4349 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4350 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4351 	if (sc->sc_ihs[0] == NULL) {
   4352 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4353 		    (pci_intr_type(sc->sc_intrs[0])
   4354 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4355 		return ENOMEM;
   4356 	}
   4357 
   4358 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4359 	sc->sc_nintrs = 1;
   4360 	return 0;
   4361 }
   4362 
   4363 static int
   4364 wm_setup_msix(struct wm_softc *sc)
   4365 {
   4366 	void *vih;
   4367 	kcpuset_t *affinity;
   4368 	int qidx, error, intr_idx, txrx_established;
   4369 	pci_chipset_tag_t pc = sc->sc_pc;
   4370 	const char *intrstr = NULL;
   4371 	char intrbuf[PCI_INTRSTR_LEN];
   4372 	char intr_xname[INTRDEVNAMEBUF];
   4373 
   4374 	if (sc->sc_nqueues < ncpu) {
   4375 		/*
   4376 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4377 		 * interrupts start from CPU#1.
   4378 		 */
   4379 		sc->sc_affinity_offset = 1;
   4380 	} else {
   4381 		/*
   4382 		 * In this case, this device use all CPUs. So, we unify
   4383 		 * affinitied cpu_index to msix vector number for readability.
   4384 		 */
   4385 		sc->sc_affinity_offset = 0;
   4386 	}
   4387 
   4388 	error = wm_alloc_txrx_queues(sc);
   4389 	if (error) {
   4390 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4391 		    error);
   4392 		return ENOMEM;
   4393 	}
   4394 
   4395 	kcpuset_create(&affinity, false);
   4396 	intr_idx = 0;
   4397 
   4398 	/*
   4399 	 * TX and RX
   4400 	 */
   4401 	txrx_established = 0;
   4402 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4403 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4404 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4405 
   4406 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4407 		    sizeof(intrbuf));
   4408 #ifdef WM_MPSAFE
   4409 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4410 		    PCI_INTR_MPSAFE, true);
   4411 #endif
   4412 		memset(intr_xname, 0, sizeof(intr_xname));
   4413 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4414 		    device_xname(sc->sc_dev), qidx);
   4415 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4416 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4417 		if (vih == NULL) {
   4418 			aprint_error_dev(sc->sc_dev,
   4419 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4420 			    intrstr ? " at " : "",
   4421 			    intrstr ? intrstr : "");
   4422 
   4423 			goto fail;
   4424 		}
   4425 		kcpuset_zero(affinity);
   4426 		/* Round-robin affinity */
   4427 		kcpuset_set(affinity, affinity_to);
   4428 		error = interrupt_distribute(vih, affinity, NULL);
   4429 		if (error == 0) {
   4430 			aprint_normal_dev(sc->sc_dev,
   4431 			    "for TX and RX interrupting at %s affinity to %u\n",
   4432 			    intrstr, affinity_to);
   4433 		} else {
   4434 			aprint_normal_dev(sc->sc_dev,
   4435 			    "for TX and RX interrupting at %s\n", intrstr);
   4436 		}
   4437 		sc->sc_ihs[intr_idx] = vih;
   4438 		wmq->wmq_id= qidx;
   4439 		wmq->wmq_intr_idx = intr_idx;
   4440 
   4441 		txrx_established++;
   4442 		intr_idx++;
   4443 	}
   4444 
   4445 	/*
   4446 	 * LINK
   4447 	 */
   4448 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4449 	    sizeof(intrbuf));
   4450 #ifdef WM_MPSAFE
   4451 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4452 #endif
   4453 	memset(intr_xname, 0, sizeof(intr_xname));
   4454 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4455 	    device_xname(sc->sc_dev));
   4456 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4457 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4458 	if (vih == NULL) {
   4459 		aprint_error_dev(sc->sc_dev,
   4460 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4461 		    intrstr ? " at " : "",
   4462 		    intrstr ? intrstr : "");
   4463 
   4464 		goto fail;
   4465 	}
   4466 	/* keep default affinity to LINK interrupt */
   4467 	aprint_normal_dev(sc->sc_dev,
   4468 	    "for LINK interrupting at %s\n", intrstr);
   4469 	sc->sc_ihs[intr_idx] = vih;
   4470 	sc->sc_link_intr_idx = intr_idx;
   4471 
   4472 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4473 	kcpuset_destroy(affinity);
   4474 	return 0;
   4475 
   4476  fail:
   4477 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4478 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4479 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4480 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4481 	}
   4482 
   4483 	kcpuset_destroy(affinity);
   4484 	return ENOMEM;
   4485 }
   4486 
   4487 /*
   4488  * wm_init:		[ifnet interface function]
   4489  *
   4490  *	Initialize the interface.
   4491  */
   4492 static int
   4493 wm_init(struct ifnet *ifp)
   4494 {
   4495 	struct wm_softc *sc = ifp->if_softc;
   4496 	int ret;
   4497 
   4498 	WM_CORE_LOCK(sc);
   4499 	ret = wm_init_locked(ifp);
   4500 	WM_CORE_UNLOCK(sc);
   4501 
   4502 	return ret;
   4503 }
   4504 
   4505 static int
   4506 wm_init_locked(struct ifnet *ifp)
   4507 {
   4508 	struct wm_softc *sc = ifp->if_softc;
   4509 	int i, j, trynum, error = 0;
   4510 	uint32_t reg;
   4511 
   4512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4513 		device_xname(sc->sc_dev), __func__));
   4514 	KASSERT(WM_CORE_LOCKED(sc));
   4515 	/*
   4516 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4517 	 * There is a small but measurable benefit to avoiding the adjusment
   4518 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4519 	 * on such platforms.  One possibility is that the DMA itself is
   4520 	 * slightly more efficient if the front of the entire packet (instead
   4521 	 * of the front of the headers) is aligned.
   4522 	 *
   4523 	 * Note we must always set align_tweak to 0 if we are using
   4524 	 * jumbo frames.
   4525 	 */
   4526 #ifdef __NO_STRICT_ALIGNMENT
   4527 	sc->sc_align_tweak = 0;
   4528 #else
   4529 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4530 		sc->sc_align_tweak = 0;
   4531 	else
   4532 		sc->sc_align_tweak = 2;
   4533 #endif /* __NO_STRICT_ALIGNMENT */
   4534 
   4535 	/* Cancel any pending I/O. */
   4536 	wm_stop_locked(ifp, 0);
   4537 
   4538 	/* update statistics before reset */
   4539 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4540 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4541 
   4542 	/* Reset the chip to a known state. */
   4543 	wm_reset(sc);
   4544 
   4545 	switch (sc->sc_type) {
   4546 	case WM_T_82571:
   4547 	case WM_T_82572:
   4548 	case WM_T_82573:
   4549 	case WM_T_82574:
   4550 	case WM_T_82583:
   4551 	case WM_T_80003:
   4552 	case WM_T_ICH8:
   4553 	case WM_T_ICH9:
   4554 	case WM_T_ICH10:
   4555 	case WM_T_PCH:
   4556 	case WM_T_PCH2:
   4557 	case WM_T_PCH_LPT:
   4558 	case WM_T_PCH_SPT:
   4559 		/* AMT based hardware can now take control from firmware */
   4560 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4561 			wm_get_hw_control(sc);
   4562 		break;
   4563 	default:
   4564 		break;
   4565 	}
   4566 
   4567 	/* Init hardware bits */
   4568 	wm_initialize_hardware_bits(sc);
   4569 
   4570 	/* Reset the PHY. */
   4571 	if (sc->sc_flags & WM_F_HAS_MII)
   4572 		wm_gmii_reset(sc);
   4573 
   4574 	/* Calculate (E)ITR value */
   4575 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4576 		sc->sc_itr = 450;	/* For EITR */
   4577 	} else if (sc->sc_type >= WM_T_82543) {
   4578 		/*
   4579 		 * Set up the interrupt throttling register (units of 256ns)
   4580 		 * Note that a footnote in Intel's documentation says this
   4581 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4582 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4583 		 * that that is also true for the 1024ns units of the other
   4584 		 * interrupt-related timer registers -- so, really, we ought
   4585 		 * to divide this value by 4 when the link speed is low.
   4586 		 *
   4587 		 * XXX implement this division at link speed change!
   4588 		 */
   4589 
   4590 		/*
   4591 		 * For N interrupts/sec, set this value to:
   4592 		 * 1000000000 / (N * 256).  Note that we set the
   4593 		 * absolute and packet timer values to this value
   4594 		 * divided by 4 to get "simple timer" behavior.
   4595 		 */
   4596 
   4597 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4598 	}
   4599 
   4600 	error = wm_init_txrx_queues(sc);
   4601 	if (error)
   4602 		goto out;
   4603 
   4604 	/*
   4605 	 * Clear out the VLAN table -- we don't use it (yet).
   4606 	 */
   4607 	CSR_WRITE(sc, WMREG_VET, 0);
   4608 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4609 		trynum = 10; /* Due to hw errata */
   4610 	else
   4611 		trynum = 1;
   4612 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4613 		for (j = 0; j < trynum; j++)
   4614 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4615 
   4616 	/*
   4617 	 * Set up flow-control parameters.
   4618 	 *
   4619 	 * XXX Values could probably stand some tuning.
   4620 	 */
   4621 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4622 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4623 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4624 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4625 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4626 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4627 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4628 	}
   4629 
   4630 	sc->sc_fcrtl = FCRTL_DFLT;
   4631 	if (sc->sc_type < WM_T_82543) {
   4632 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4633 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4634 	} else {
   4635 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4636 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4637 	}
   4638 
   4639 	if (sc->sc_type == WM_T_80003)
   4640 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4641 	else
   4642 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4643 
   4644 	/* Writes the control register. */
   4645 	wm_set_vlan(sc);
   4646 
   4647 	if (sc->sc_flags & WM_F_HAS_MII) {
   4648 		int val;
   4649 
   4650 		switch (sc->sc_type) {
   4651 		case WM_T_80003:
   4652 		case WM_T_ICH8:
   4653 		case WM_T_ICH9:
   4654 		case WM_T_ICH10:
   4655 		case WM_T_PCH:
   4656 		case WM_T_PCH2:
   4657 		case WM_T_PCH_LPT:
   4658 		case WM_T_PCH_SPT:
   4659 			/*
   4660 			 * Set the mac to wait the maximum time between each
   4661 			 * iteration and increase the max iterations when
   4662 			 * polling the phy; this fixes erroneous timeouts at
   4663 			 * 10Mbps.
   4664 			 */
   4665 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4666 			    0xFFFF);
   4667 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4668 			val |= 0x3F;
   4669 			wm_kmrn_writereg(sc,
   4670 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4671 			break;
   4672 		default:
   4673 			break;
   4674 		}
   4675 
   4676 		if (sc->sc_type == WM_T_80003) {
   4677 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4678 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4679 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4680 
   4681 			/* Bypass RX and TX FIFO's */
   4682 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4683 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4684 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4685 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4686 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4687 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4688 		}
   4689 	}
   4690 #if 0
   4691 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4692 #endif
   4693 
   4694 	/* Set up checksum offload parameters. */
   4695 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4696 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4697 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4698 		reg |= RXCSUM_IPOFL;
   4699 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4700 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4701 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4702 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4703 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4704 
   4705 	/* Set up MSI-X */
   4706 	if (sc->sc_nintrs > 1) {
   4707 		uint32_t ivar;
   4708 		struct wm_queue *wmq;
   4709 		int qid, qintr_idx;
   4710 
   4711 		if (sc->sc_type == WM_T_82575) {
   4712 			/* Interrupt control */
   4713 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4714 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4715 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4716 
   4717 			/* TX and RX */
   4718 			for (i = 0; i < sc->sc_nqueues; i++) {
   4719 				wmq = &sc->sc_queue[i];
   4720 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4721 				    EITR_TX_QUEUE(wmq->wmq_id)
   4722 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4723 			}
   4724 			/* Link status */
   4725 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4726 			    EITR_OTHER);
   4727 		} else if (sc->sc_type == WM_T_82574) {
   4728 			/* Interrupt control */
   4729 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4730 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4731 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4732 
   4733 			ivar = 0;
   4734 			/* TX and RX */
   4735 			for (i = 0; i < sc->sc_nqueues; i++) {
   4736 				wmq = &sc->sc_queue[i];
   4737 				qid = wmq->wmq_id;
   4738 				qintr_idx = wmq->wmq_intr_idx;
   4739 
   4740 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4741 				    IVAR_TX_MASK_Q_82574(qid));
   4742 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4743 				    IVAR_RX_MASK_Q_82574(qid));
   4744 			}
   4745 			/* Link status */
   4746 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4747 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4748 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4749 		} else {
   4750 			/* Interrupt control */
   4751 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4752 			    | GPIE_EIAME | GPIE_PBA);
   4753 
   4754 			switch (sc->sc_type) {
   4755 			case WM_T_82580:
   4756 			case WM_T_I350:
   4757 			case WM_T_I354:
   4758 			case WM_T_I210:
   4759 			case WM_T_I211:
   4760 				/* TX and RX */
   4761 				for (i = 0; i < sc->sc_nqueues; i++) {
   4762 					wmq = &sc->sc_queue[i];
   4763 					qid = wmq->wmq_id;
   4764 					qintr_idx = wmq->wmq_intr_idx;
   4765 
   4766 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4767 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4768 					ivar |= __SHIFTIN((qintr_idx
   4769 						| IVAR_VALID),
   4770 					    IVAR_TX_MASK_Q(qid));
   4771 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4772 					ivar |= __SHIFTIN((qintr_idx
   4773 						| IVAR_VALID),
   4774 					    IVAR_RX_MASK_Q(qid));
   4775 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4776 				}
   4777 				break;
   4778 			case WM_T_82576:
   4779 				/* TX and RX */
   4780 				for (i = 0; i < sc->sc_nqueues; i++) {
   4781 					wmq = &sc->sc_queue[i];
   4782 					qid = wmq->wmq_id;
   4783 					qintr_idx = wmq->wmq_intr_idx;
   4784 
   4785 					ivar = CSR_READ(sc,
   4786 					    WMREG_IVAR_Q_82576(qid));
   4787 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4788 					ivar |= __SHIFTIN((qintr_idx
   4789 						| IVAR_VALID),
   4790 					    IVAR_TX_MASK_Q_82576(qid));
   4791 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4792 					ivar |= __SHIFTIN((qintr_idx
   4793 						| IVAR_VALID),
   4794 					    IVAR_RX_MASK_Q_82576(qid));
   4795 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4796 					    ivar);
   4797 				}
   4798 				break;
   4799 			default:
   4800 				break;
   4801 			}
   4802 
   4803 			/* Link status */
   4804 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4805 			    IVAR_MISC_OTHER);
   4806 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4807 		}
   4808 
   4809 		if (sc->sc_nqueues > 1) {
   4810 			wm_init_rss(sc);
   4811 
   4812 			/*
   4813 			** NOTE: Receive Full-Packet Checksum Offload
   4814 			** is mutually exclusive with Multiqueue. However
   4815 			** this is not the same as TCP/IP checksums which
   4816 			** still work.
   4817 			*/
   4818 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4819 			reg |= RXCSUM_PCSD;
   4820 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4821 		}
   4822 	}
   4823 
   4824 	/* Set up the interrupt registers. */
   4825 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4826 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4827 	    ICR_RXO | ICR_RXT0;
   4828 	if (sc->sc_nintrs > 1) {
   4829 		uint32_t mask;
   4830 		struct wm_queue *wmq;
   4831 
   4832 		switch (sc->sc_type) {
   4833 		case WM_T_82574:
   4834 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4835 			    WMREG_EIAC_82574_MSIX_MASK);
   4836 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4837 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4838 			break;
   4839 		default:
   4840 			if (sc->sc_type == WM_T_82575) {
   4841 				mask = 0;
   4842 				for (i = 0; i < sc->sc_nqueues; i++) {
   4843 					wmq = &sc->sc_queue[i];
   4844 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4845 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4846 				}
   4847 				mask |= EITR_OTHER;
   4848 			} else {
   4849 				mask = 0;
   4850 				for (i = 0; i < sc->sc_nqueues; i++) {
   4851 					wmq = &sc->sc_queue[i];
   4852 					mask |= 1 << wmq->wmq_intr_idx;
   4853 				}
   4854 				mask |= 1 << sc->sc_link_intr_idx;
   4855 			}
   4856 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4857 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4858 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4859 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4860 			break;
   4861 		}
   4862 	} else
   4863 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4864 
   4865 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4866 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4867 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4868 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4869 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4870 		reg |= KABGTXD_BGSQLBIAS;
   4871 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4872 	}
   4873 
   4874 	/* Set up the inter-packet gap. */
   4875 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4876 
   4877 	if (sc->sc_type >= WM_T_82543) {
   4878 		/*
   4879 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4880 		 * the multi queue function with MSI-X.
   4881 		 */
   4882 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4883 			int qidx;
   4884 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4885 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   4886 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   4887 				    sc->sc_itr);
   4888 			}
   4889 			/*
   4890 			 * Link interrupts occur much less than TX
   4891 			 * interrupts and RX interrupts. So, we don't
   4892 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4893 			 * FreeBSD's if_igb.
   4894 			 */
   4895 		} else
   4896 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4897 	}
   4898 
   4899 	/* Set the VLAN ethernetype. */
   4900 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4901 
   4902 	/*
   4903 	 * Set up the transmit control register; we start out with
   4904 	 * a collision distance suitable for FDX, but update it whe
   4905 	 * we resolve the media type.
   4906 	 */
   4907 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4908 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4909 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4910 	if (sc->sc_type >= WM_T_82571)
   4911 		sc->sc_tctl |= TCTL_MULR;
   4912 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4913 
   4914 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4915 		/* Write TDT after TCTL.EN is set. See the document. */
   4916 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4917 	}
   4918 
   4919 	if (sc->sc_type == WM_T_80003) {
   4920 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4921 		reg &= ~TCTL_EXT_GCEX_MASK;
   4922 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4923 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4924 	}
   4925 
   4926 	/* Set the media. */
   4927 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4928 		goto out;
   4929 
   4930 	/* Configure for OS presence */
   4931 	wm_init_manageability(sc);
   4932 
   4933 	/*
   4934 	 * Set up the receive control register; we actually program
   4935 	 * the register when we set the receive filter.  Use multicast
   4936 	 * address offset type 0.
   4937 	 *
   4938 	 * Only the i82544 has the ability to strip the incoming
   4939 	 * CRC, so we don't enable that feature.
   4940 	 */
   4941 	sc->sc_mchash_type = 0;
   4942 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4943 	    | RCTL_MO(sc->sc_mchash_type);
   4944 
   4945 	/*
   4946 	 * The I350 has a bug where it always strips the CRC whether
   4947 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4948 	 */
   4949 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4950 	    || (sc->sc_type == WM_T_I210))
   4951 		sc->sc_rctl |= RCTL_SECRC;
   4952 
   4953 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4954 	    && (ifp->if_mtu > ETHERMTU)) {
   4955 		sc->sc_rctl |= RCTL_LPE;
   4956 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4957 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4958 	}
   4959 
   4960 	if (MCLBYTES == 2048) {
   4961 		sc->sc_rctl |= RCTL_2k;
   4962 	} else {
   4963 		if (sc->sc_type >= WM_T_82543) {
   4964 			switch (MCLBYTES) {
   4965 			case 4096:
   4966 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4967 				break;
   4968 			case 8192:
   4969 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4970 				break;
   4971 			case 16384:
   4972 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4973 				break;
   4974 			default:
   4975 				panic("wm_init: MCLBYTES %d unsupported",
   4976 				    MCLBYTES);
   4977 				break;
   4978 			}
   4979 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4980 	}
   4981 
   4982 	/* Set the receive filter. */
   4983 	wm_set_filter(sc);
   4984 
   4985 	/* Enable ECC */
   4986 	switch (sc->sc_type) {
   4987 	case WM_T_82571:
   4988 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4989 		reg |= PBA_ECC_CORR_EN;
   4990 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4991 		break;
   4992 	case WM_T_PCH_LPT:
   4993 	case WM_T_PCH_SPT:
   4994 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4995 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4996 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4997 
   4998 		reg = CSR_READ(sc, WMREG_CTRL);
   4999 		reg |= CTRL_MEHE;
   5000 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5001 		break;
   5002 	default:
   5003 		break;
   5004 	}
   5005 
   5006 	/* On 575 and later set RDT only if RX enabled */
   5007 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5008 		int qidx;
   5009 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5010 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5011 			for (i = 0; i < WM_NRXDESC; i++) {
   5012 				WM_RX_LOCK(rxq);
   5013 				wm_init_rxdesc(rxq, i);
   5014 				WM_RX_UNLOCK(rxq);
   5015 
   5016 			}
   5017 		}
   5018 	}
   5019 
   5020 	sc->sc_stopping = false;
   5021 
   5022 	/* Start the one second link check clock. */
   5023 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5024 
   5025 	/* ...all done! */
   5026 	ifp->if_flags |= IFF_RUNNING;
   5027 	ifp->if_flags &= ~IFF_OACTIVE;
   5028 
   5029  out:
   5030 	sc->sc_if_flags = ifp->if_flags;
   5031 	if (error)
   5032 		log(LOG_ERR, "%s: interface not running\n",
   5033 		    device_xname(sc->sc_dev));
   5034 	return error;
   5035 }
   5036 
   5037 /*
   5038  * wm_stop:		[ifnet interface function]
   5039  *
   5040  *	Stop transmission on the interface.
   5041  */
   5042 static void
   5043 wm_stop(struct ifnet *ifp, int disable)
   5044 {
   5045 	struct wm_softc *sc = ifp->if_softc;
   5046 
   5047 	WM_CORE_LOCK(sc);
   5048 	wm_stop_locked(ifp, disable);
   5049 	WM_CORE_UNLOCK(sc);
   5050 }
   5051 
   5052 static void
   5053 wm_stop_locked(struct ifnet *ifp, int disable)
   5054 {
   5055 	struct wm_softc *sc = ifp->if_softc;
   5056 	struct wm_txsoft *txs;
   5057 	int i, qidx;
   5058 
   5059 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5060 		device_xname(sc->sc_dev), __func__));
   5061 	KASSERT(WM_CORE_LOCKED(sc));
   5062 
   5063 	sc->sc_stopping = true;
   5064 
   5065 	/* Stop the one second clock. */
   5066 	callout_stop(&sc->sc_tick_ch);
   5067 
   5068 	/* Stop the 82547 Tx FIFO stall check timer. */
   5069 	if (sc->sc_type == WM_T_82547)
   5070 		callout_stop(&sc->sc_txfifo_ch);
   5071 
   5072 	if (sc->sc_flags & WM_F_HAS_MII) {
   5073 		/* Down the MII. */
   5074 		mii_down(&sc->sc_mii);
   5075 	} else {
   5076 #if 0
   5077 		/* Should we clear PHY's status properly? */
   5078 		wm_reset(sc);
   5079 #endif
   5080 	}
   5081 
   5082 	/* Stop the transmit and receive processes. */
   5083 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5084 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5085 	sc->sc_rctl &= ~RCTL_EN;
   5086 
   5087 	/*
   5088 	 * Clear the interrupt mask to ensure the device cannot assert its
   5089 	 * interrupt line.
   5090 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5091 	 * service any currently pending or shared interrupt.
   5092 	 */
   5093 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5094 	sc->sc_icr = 0;
   5095 	if (sc->sc_nintrs > 1) {
   5096 		if (sc->sc_type != WM_T_82574) {
   5097 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5098 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5099 		} else
   5100 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5101 	}
   5102 
   5103 	/* Release any queued transmit buffers. */
   5104 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5105 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5106 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5107 		WM_TX_LOCK(txq);
   5108 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5109 			txs = &txq->txq_soft[i];
   5110 			if (txs->txs_mbuf != NULL) {
   5111 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5112 				m_freem(txs->txs_mbuf);
   5113 				txs->txs_mbuf = NULL;
   5114 			}
   5115 		}
   5116 		if (sc->sc_type == WM_T_PCH_SPT) {
   5117 			pcireg_t preg;
   5118 			uint32_t reg;
   5119 			int nexttx;
   5120 
   5121 			/* First, disable MULR fix in FEXTNVM11 */
   5122 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5123 			reg |= FEXTNVM11_DIS_MULRFIX;
   5124 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5125 
   5126 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5127 			    WM_PCI_DESCRING_STATUS);
   5128 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5129 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5130 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5131 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5132 			    && (reg != 0)) {
   5133 				/* TX */
   5134 				printf("XXX need TX flush (reg = %08x)\n",
   5135 				    preg);
   5136 				wm_init_tx_descs(sc, txq);
   5137 				wm_init_tx_regs(sc, wmq, txq);
   5138 				nexttx = txq->txq_next;
   5139 				wm_set_dma_addr(
   5140 					&txq->txq_descs[nexttx].wtx_addr,
   5141 					WM_CDTXADDR(txq, nexttx));
   5142 				txq->txq_descs[nexttx].wtx_cmdlen
   5143 				    = htole32(WTX_CMD_IFCS | 512);
   5144 				wm_cdtxsync(txq, nexttx, 1,
   5145 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5146 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5147 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5148 				CSR_WRITE_FLUSH(sc);
   5149 				delay(250);
   5150 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5151 			}
   5152 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5153 			    WM_PCI_DESCRING_STATUS);
   5154 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5155 				/* RX */
   5156 				printf("XXX need RX flush\n");
   5157 			}
   5158 		}
   5159 		WM_TX_UNLOCK(txq);
   5160 	}
   5161 
   5162 	/* Mark the interface as down and cancel the watchdog timer. */
   5163 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5164 	ifp->if_timer = 0;
   5165 
   5166 	if (disable) {
   5167 		for (i = 0; i < sc->sc_nqueues; i++) {
   5168 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5169 			WM_RX_LOCK(rxq);
   5170 			wm_rxdrain(rxq);
   5171 			WM_RX_UNLOCK(rxq);
   5172 		}
   5173 	}
   5174 
   5175 #if 0 /* notyet */
   5176 	if (sc->sc_type >= WM_T_82544)
   5177 		CSR_WRITE(sc, WMREG_WUC, 0);
   5178 #endif
   5179 }
   5180 
   5181 static void
   5182 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5183 {
   5184 	struct mbuf *m;
   5185 	int i;
   5186 
   5187 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5188 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5189 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5190 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5191 		    m->m_data, m->m_len, m->m_flags);
   5192 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5193 	    i, i == 1 ? "" : "s");
   5194 }
   5195 
   5196 /*
   5197  * wm_82547_txfifo_stall:
   5198  *
   5199  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5200  *	reset the FIFO pointers, and restart packet transmission.
   5201  */
   5202 static void
   5203 wm_82547_txfifo_stall(void *arg)
   5204 {
   5205 	struct wm_softc *sc = arg;
   5206 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5207 #ifndef WM_MPSAFE
   5208 	int s;
   5209 
   5210 	s = splnet();
   5211 #endif
   5212 	WM_TX_LOCK(txq);
   5213 
   5214 	if (sc->sc_stopping)
   5215 		goto out;
   5216 
   5217 	if (txq->txq_fifo_stall) {
   5218 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5219 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5220 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5221 			/*
   5222 			 * Packets have drained.  Stop transmitter, reset
   5223 			 * FIFO pointers, restart transmitter, and kick
   5224 			 * the packet queue.
   5225 			 */
   5226 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5227 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5228 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5229 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5230 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5231 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5232 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5233 			CSR_WRITE_FLUSH(sc);
   5234 
   5235 			txq->txq_fifo_head = 0;
   5236 			txq->txq_fifo_stall = 0;
   5237 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5238 		} else {
   5239 			/*
   5240 			 * Still waiting for packets to drain; try again in
   5241 			 * another tick.
   5242 			 */
   5243 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5244 		}
   5245 	}
   5246 
   5247 out:
   5248 	WM_TX_UNLOCK(txq);
   5249 #ifndef WM_MPSAFE
   5250 	splx(s);
   5251 #endif
   5252 }
   5253 
   5254 /*
   5255  * wm_82547_txfifo_bugchk:
   5256  *
   5257  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5258  *	prevent enqueueing a packet that would wrap around the end
   5259  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5260  *
   5261  *	We do this by checking the amount of space before the end
   5262  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5263  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5264  *	the internal FIFO pointers to the beginning, and restart
   5265  *	transmission on the interface.
   5266  */
   5267 #define	WM_FIFO_HDR		0x10
   5268 #define	WM_82547_PAD_LEN	0x3e0
   5269 static int
   5270 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5271 {
   5272 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5273 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5274 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5275 
   5276 	/* Just return if already stalled. */
   5277 	if (txq->txq_fifo_stall)
   5278 		return 1;
   5279 
   5280 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5281 		/* Stall only occurs in half-duplex mode. */
   5282 		goto send_packet;
   5283 	}
   5284 
   5285 	if (len >= WM_82547_PAD_LEN + space) {
   5286 		txq->txq_fifo_stall = 1;
   5287 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5288 		return 1;
   5289 	}
   5290 
   5291  send_packet:
   5292 	txq->txq_fifo_head += len;
   5293 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5294 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5295 
   5296 	return 0;
   5297 }
   5298 
   5299 static int
   5300 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5301 {
   5302 	int error;
   5303 
   5304 	/*
   5305 	 * Allocate the control data structures, and create and load the
   5306 	 * DMA map for it.
   5307 	 *
   5308 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5309 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5310 	 * both sets within the same 4G segment.
   5311 	 */
   5312 	if (sc->sc_type < WM_T_82544)
   5313 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5314 	else
   5315 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5316 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5317 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5318 	else
   5319 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5320 
   5321 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5322 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5323 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5324 		aprint_error_dev(sc->sc_dev,
   5325 		    "unable to allocate TX control data, error = %d\n",
   5326 		    error);
   5327 		goto fail_0;
   5328 	}
   5329 
   5330 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5331 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5332 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5333 		aprint_error_dev(sc->sc_dev,
   5334 		    "unable to map TX control data, error = %d\n", error);
   5335 		goto fail_1;
   5336 	}
   5337 
   5338 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5339 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5340 		aprint_error_dev(sc->sc_dev,
   5341 		    "unable to create TX control data DMA map, error = %d\n",
   5342 		    error);
   5343 		goto fail_2;
   5344 	}
   5345 
   5346 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5347 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5348 		aprint_error_dev(sc->sc_dev,
   5349 		    "unable to load TX control data DMA map, error = %d\n",
   5350 		    error);
   5351 		goto fail_3;
   5352 	}
   5353 
   5354 	return 0;
   5355 
   5356  fail_3:
   5357 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5358  fail_2:
   5359 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5360 	    WM_TXDESCS_SIZE(txq));
   5361  fail_1:
   5362 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5363  fail_0:
   5364 	return error;
   5365 }
   5366 
   5367 static void
   5368 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5369 {
   5370 
   5371 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5372 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5373 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5374 	    WM_TXDESCS_SIZE(txq));
   5375 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5376 }
   5377 
   5378 static int
   5379 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5380 {
   5381 	int error;
   5382 
   5383 	/*
   5384 	 * Allocate the control data structures, and create and load the
   5385 	 * DMA map for it.
   5386 	 *
   5387 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5388 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5389 	 * both sets within the same 4G segment.
   5390 	 */
   5391 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5392 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5393 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5394 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5395 		aprint_error_dev(sc->sc_dev,
   5396 		    "unable to allocate RX control data, error = %d\n",
   5397 		    error);
   5398 		goto fail_0;
   5399 	}
   5400 
   5401 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5402 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5403 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5404 		aprint_error_dev(sc->sc_dev,
   5405 		    "unable to map RX control data, error = %d\n", error);
   5406 		goto fail_1;
   5407 	}
   5408 
   5409 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5410 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5411 		aprint_error_dev(sc->sc_dev,
   5412 		    "unable to create RX control data DMA map, error = %d\n",
   5413 		    error);
   5414 		goto fail_2;
   5415 	}
   5416 
   5417 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5418 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5419 		aprint_error_dev(sc->sc_dev,
   5420 		    "unable to load RX control data DMA map, error = %d\n",
   5421 		    error);
   5422 		goto fail_3;
   5423 	}
   5424 
   5425 	return 0;
   5426 
   5427  fail_3:
   5428 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5429  fail_2:
   5430 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5431 	    rxq->rxq_desc_size);
   5432  fail_1:
   5433 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5434  fail_0:
   5435 	return error;
   5436 }
   5437 
   5438 static void
   5439 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5440 {
   5441 
   5442 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5443 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5444 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5445 	    rxq->rxq_desc_size);
   5446 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5447 }
   5448 
   5449 
   5450 static int
   5451 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5452 {
   5453 	int i, error;
   5454 
   5455 	/* Create the transmit buffer DMA maps. */
   5456 	WM_TXQUEUELEN(txq) =
   5457 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5458 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5459 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5460 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5461 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5462 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5463 			aprint_error_dev(sc->sc_dev,
   5464 			    "unable to create Tx DMA map %d, error = %d\n",
   5465 			    i, error);
   5466 			goto fail;
   5467 		}
   5468 	}
   5469 
   5470 	return 0;
   5471 
   5472  fail:
   5473 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5474 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5475 			bus_dmamap_destroy(sc->sc_dmat,
   5476 			    txq->txq_soft[i].txs_dmamap);
   5477 	}
   5478 	return error;
   5479 }
   5480 
   5481 static void
   5482 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5483 {
   5484 	int i;
   5485 
   5486 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5487 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5488 			bus_dmamap_destroy(sc->sc_dmat,
   5489 			    txq->txq_soft[i].txs_dmamap);
   5490 	}
   5491 }
   5492 
   5493 static int
   5494 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5495 {
   5496 	int i, error;
   5497 
   5498 	/* Create the receive buffer DMA maps. */
   5499 	for (i = 0; i < WM_NRXDESC; i++) {
   5500 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5501 			    MCLBYTES, 0, 0,
   5502 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5503 			aprint_error_dev(sc->sc_dev,
   5504 			    "unable to create Rx DMA map %d error = %d\n",
   5505 			    i, error);
   5506 			goto fail;
   5507 		}
   5508 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5509 	}
   5510 
   5511 	return 0;
   5512 
   5513  fail:
   5514 	for (i = 0; i < WM_NRXDESC; i++) {
   5515 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5516 			bus_dmamap_destroy(sc->sc_dmat,
   5517 			    rxq->rxq_soft[i].rxs_dmamap);
   5518 	}
   5519 	return error;
   5520 }
   5521 
   5522 static void
   5523 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5524 {
   5525 	int i;
   5526 
   5527 	for (i = 0; i < WM_NRXDESC; i++) {
   5528 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5529 			bus_dmamap_destroy(sc->sc_dmat,
   5530 			    rxq->rxq_soft[i].rxs_dmamap);
   5531 	}
   5532 }
   5533 
   5534 /*
   5535  * wm_alloc_quques:
   5536  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5537  */
   5538 static int
   5539 wm_alloc_txrx_queues(struct wm_softc *sc)
   5540 {
   5541 	int i, error, tx_done, rx_done;
   5542 
   5543 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5544 	    KM_SLEEP);
   5545 	if (sc->sc_queue == NULL) {
   5546 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5547 		error = ENOMEM;
   5548 		goto fail_0;
   5549 	}
   5550 
   5551 	/*
   5552 	 * For transmission
   5553 	 */
   5554 	error = 0;
   5555 	tx_done = 0;
   5556 	for (i = 0; i < sc->sc_nqueues; i++) {
   5557 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5558 		txq->txq_sc = sc;
   5559 #ifdef WM_MPSAFE
   5560 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5561 #else
   5562 		txq->txq_lock = NULL;
   5563 #endif
   5564 		error = wm_alloc_tx_descs(sc, txq);
   5565 		if (error)
   5566 			break;
   5567 		error = wm_alloc_tx_buffer(sc, txq);
   5568 		if (error) {
   5569 			wm_free_tx_descs(sc, txq);
   5570 			break;
   5571 		}
   5572 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5573 		if (txq->txq_interq == NULL) {
   5574 			wm_free_tx_descs(sc, txq);
   5575 			wm_free_tx_buffer(sc, txq);
   5576 			error = ENOMEM;
   5577 			break;
   5578 		}
   5579 		tx_done++;
   5580 	}
   5581 	if (error)
   5582 		goto fail_1;
   5583 
   5584 	/*
   5585 	 * For recieve
   5586 	 */
   5587 	error = 0;
   5588 	rx_done = 0;
   5589 	for (i = 0; i < sc->sc_nqueues; i++) {
   5590 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5591 		rxq->rxq_sc = sc;
   5592 #ifdef WM_MPSAFE
   5593 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5594 #else
   5595 		rxq->rxq_lock = NULL;
   5596 #endif
   5597 		error = wm_alloc_rx_descs(sc, rxq);
   5598 		if (error)
   5599 			break;
   5600 
   5601 		error = wm_alloc_rx_buffer(sc, rxq);
   5602 		if (error) {
   5603 			wm_free_rx_descs(sc, rxq);
   5604 			break;
   5605 		}
   5606 
   5607 		rx_done++;
   5608 	}
   5609 	if (error)
   5610 		goto fail_2;
   5611 
   5612 	return 0;
   5613 
   5614  fail_2:
   5615 	for (i = 0; i < rx_done; i++) {
   5616 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5617 		wm_free_rx_buffer(sc, rxq);
   5618 		wm_free_rx_descs(sc, rxq);
   5619 		if (rxq->rxq_lock)
   5620 			mutex_obj_free(rxq->rxq_lock);
   5621 	}
   5622  fail_1:
   5623 	for (i = 0; i < tx_done; i++) {
   5624 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5625 		pcq_destroy(txq->txq_interq);
   5626 		wm_free_tx_buffer(sc, txq);
   5627 		wm_free_tx_descs(sc, txq);
   5628 		if (txq->txq_lock)
   5629 			mutex_obj_free(txq->txq_lock);
   5630 	}
   5631 
   5632 	kmem_free(sc->sc_queue,
   5633 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5634  fail_0:
   5635 	return error;
   5636 }
   5637 
   5638 /*
   5639  * wm_free_quques:
   5640  *	Free {tx,rx}descs and {tx,rx} buffers
   5641  */
   5642 static void
   5643 wm_free_txrx_queues(struct wm_softc *sc)
   5644 {
   5645 	int i;
   5646 
   5647 	for (i = 0; i < sc->sc_nqueues; i++) {
   5648 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5649 		wm_free_rx_buffer(sc, rxq);
   5650 		wm_free_rx_descs(sc, rxq);
   5651 		if (rxq->rxq_lock)
   5652 			mutex_obj_free(rxq->rxq_lock);
   5653 	}
   5654 
   5655 	for (i = 0; i < sc->sc_nqueues; i++) {
   5656 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5657 		wm_free_tx_buffer(sc, txq);
   5658 		wm_free_tx_descs(sc, txq);
   5659 		if (txq->txq_lock)
   5660 			mutex_obj_free(txq->txq_lock);
   5661 	}
   5662 
   5663 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5664 }
   5665 
   5666 static void
   5667 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5668 {
   5669 
   5670 	KASSERT(WM_TX_LOCKED(txq));
   5671 
   5672 	/* Initialize the transmit descriptor ring. */
   5673 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5674 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5675 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5676 	txq->txq_free = WM_NTXDESC(txq);
   5677 	txq->txq_next = 0;
   5678 }
   5679 
   5680 static void
   5681 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5682     struct wm_txqueue *txq)
   5683 {
   5684 
   5685 	KASSERT(WM_TX_LOCKED(txq));
   5686 
   5687 	if (sc->sc_type < WM_T_82543) {
   5688 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5689 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5690 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5691 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5692 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5693 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5694 	} else {
   5695 		int qid = wmq->wmq_id;
   5696 
   5697 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5698 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5699 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5700 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5701 
   5702 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5703 			/*
   5704 			 * Don't write TDT before TCTL.EN is set.
   5705 			 * See the document.
   5706 			 */
   5707 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5708 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5709 			    | TXDCTL_WTHRESH(0));
   5710 		else {
   5711 			/* ITR / 4 */
   5712 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5713 			if (sc->sc_type >= WM_T_82540) {
   5714 				/* should be same */
   5715 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5716 			}
   5717 
   5718 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5719 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5720 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5721 		}
   5722 	}
   5723 }
   5724 
   5725 static void
   5726 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5727 {
   5728 	int i;
   5729 
   5730 	KASSERT(WM_TX_LOCKED(txq));
   5731 
   5732 	/* Initialize the transmit job descriptors. */
   5733 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5734 		txq->txq_soft[i].txs_mbuf = NULL;
   5735 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5736 	txq->txq_snext = 0;
   5737 	txq->txq_sdirty = 0;
   5738 }
   5739 
   5740 static void
   5741 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5742     struct wm_txqueue *txq)
   5743 {
   5744 
   5745 	KASSERT(WM_TX_LOCKED(txq));
   5746 
   5747 	/*
   5748 	 * Set up some register offsets that are different between
   5749 	 * the i82542 and the i82543 and later chips.
   5750 	 */
   5751 	if (sc->sc_type < WM_T_82543)
   5752 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5753 	else
   5754 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5755 
   5756 	wm_init_tx_descs(sc, txq);
   5757 	wm_init_tx_regs(sc, wmq, txq);
   5758 	wm_init_tx_buffer(sc, txq);
   5759 }
   5760 
   5761 static void
   5762 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5763     struct wm_rxqueue *rxq)
   5764 {
   5765 
   5766 	KASSERT(WM_RX_LOCKED(rxq));
   5767 
   5768 	/*
   5769 	 * Initialize the receive descriptor and receive job
   5770 	 * descriptor rings.
   5771 	 */
   5772 	if (sc->sc_type < WM_T_82543) {
   5773 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5774 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5775 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5776 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5777 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5778 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5779 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5780 
   5781 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5782 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5783 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5784 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5785 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5786 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5787 	} else {
   5788 		int qid = wmq->wmq_id;
   5789 
   5790 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5791 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5792 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5793 
   5794 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5795 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5796 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5797 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5798 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5799 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5800 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5801 			    | RXDCTL_WTHRESH(1));
   5802 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5803 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5804 		} else {
   5805 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5806 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5807 			/* ITR / 4 */
   5808 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5809 			/* MUST be same */
   5810 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5811 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5812 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5813 		}
   5814 	}
   5815 }
   5816 
   5817 static int
   5818 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5819 {
   5820 	struct wm_rxsoft *rxs;
   5821 	int error, i;
   5822 
   5823 	KASSERT(WM_RX_LOCKED(rxq));
   5824 
   5825 	for (i = 0; i < WM_NRXDESC; i++) {
   5826 		rxs = &rxq->rxq_soft[i];
   5827 		if (rxs->rxs_mbuf == NULL) {
   5828 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5829 				log(LOG_ERR, "%s: unable to allocate or map "
   5830 				    "rx buffer %d, error = %d\n",
   5831 				    device_xname(sc->sc_dev), i, error);
   5832 				/*
   5833 				 * XXX Should attempt to run with fewer receive
   5834 				 * XXX buffers instead of just failing.
   5835 				 */
   5836 				wm_rxdrain(rxq);
   5837 				return ENOMEM;
   5838 			}
   5839 		} else {
   5840 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5841 				wm_init_rxdesc(rxq, i);
   5842 			/*
   5843 			 * For 82575 and newer device, the RX descriptors
   5844 			 * must be initialized after the setting of RCTL.EN in
   5845 			 * wm_set_filter()
   5846 			 */
   5847 		}
   5848 	}
   5849 	rxq->rxq_ptr = 0;
   5850 	rxq->rxq_discard = 0;
   5851 	WM_RXCHAIN_RESET(rxq);
   5852 
   5853 	return 0;
   5854 }
   5855 
   5856 static int
   5857 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5858     struct wm_rxqueue *rxq)
   5859 {
   5860 
   5861 	KASSERT(WM_RX_LOCKED(rxq));
   5862 
   5863 	/*
   5864 	 * Set up some register offsets that are different between
   5865 	 * the i82542 and the i82543 and later chips.
   5866 	 */
   5867 	if (sc->sc_type < WM_T_82543)
   5868 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5869 	else
   5870 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   5871 
   5872 	wm_init_rx_regs(sc, wmq, rxq);
   5873 	return wm_init_rx_buffer(sc, rxq);
   5874 }
   5875 
   5876 /*
   5877  * wm_init_quques:
   5878  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5879  */
   5880 static int
   5881 wm_init_txrx_queues(struct wm_softc *sc)
   5882 {
   5883 	int i, error = 0;
   5884 
   5885 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5886 		device_xname(sc->sc_dev), __func__));
   5887 	for (i = 0; i < sc->sc_nqueues; i++) {
   5888 		struct wm_queue *wmq = &sc->sc_queue[i];
   5889 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5890 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5891 
   5892 		WM_TX_LOCK(txq);
   5893 		wm_init_tx_queue(sc, wmq, txq);
   5894 		WM_TX_UNLOCK(txq);
   5895 
   5896 		WM_RX_LOCK(rxq);
   5897 		error = wm_init_rx_queue(sc, wmq, rxq);
   5898 		WM_RX_UNLOCK(rxq);
   5899 		if (error)
   5900 			break;
   5901 	}
   5902 
   5903 	return error;
   5904 }
   5905 
   5906 /*
   5907  * wm_tx_offload:
   5908  *
   5909  *	Set up TCP/IP checksumming parameters for the
   5910  *	specified packet.
   5911  */
   5912 static int
   5913 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5914     uint8_t *fieldsp)
   5915 {
   5916 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5917 	struct mbuf *m0 = txs->txs_mbuf;
   5918 	struct livengood_tcpip_ctxdesc *t;
   5919 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5920 	uint32_t ipcse;
   5921 	struct ether_header *eh;
   5922 	int offset, iphl;
   5923 	uint8_t fields;
   5924 
   5925 	/*
   5926 	 * XXX It would be nice if the mbuf pkthdr had offset
   5927 	 * fields for the protocol headers.
   5928 	 */
   5929 
   5930 	eh = mtod(m0, struct ether_header *);
   5931 	switch (htons(eh->ether_type)) {
   5932 	case ETHERTYPE_IP:
   5933 	case ETHERTYPE_IPV6:
   5934 		offset = ETHER_HDR_LEN;
   5935 		break;
   5936 
   5937 	case ETHERTYPE_VLAN:
   5938 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5939 		break;
   5940 
   5941 	default:
   5942 		/*
   5943 		 * Don't support this protocol or encapsulation.
   5944 		 */
   5945 		*fieldsp = 0;
   5946 		*cmdp = 0;
   5947 		return 0;
   5948 	}
   5949 
   5950 	if ((m0->m_pkthdr.csum_flags &
   5951 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5952 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5953 	} else {
   5954 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5955 	}
   5956 	ipcse = offset + iphl - 1;
   5957 
   5958 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5959 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5960 	seg = 0;
   5961 	fields = 0;
   5962 
   5963 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5964 		int hlen = offset + iphl;
   5965 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5966 
   5967 		if (__predict_false(m0->m_len <
   5968 				    (hlen + sizeof(struct tcphdr)))) {
   5969 			/*
   5970 			 * TCP/IP headers are not in the first mbuf; we need
   5971 			 * to do this the slow and painful way.  Let's just
   5972 			 * hope this doesn't happen very often.
   5973 			 */
   5974 			struct tcphdr th;
   5975 
   5976 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5977 
   5978 			m_copydata(m0, hlen, sizeof(th), &th);
   5979 			if (v4) {
   5980 				struct ip ip;
   5981 
   5982 				m_copydata(m0, offset, sizeof(ip), &ip);
   5983 				ip.ip_len = 0;
   5984 				m_copyback(m0,
   5985 				    offset + offsetof(struct ip, ip_len),
   5986 				    sizeof(ip.ip_len), &ip.ip_len);
   5987 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5988 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5989 			} else {
   5990 				struct ip6_hdr ip6;
   5991 
   5992 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5993 				ip6.ip6_plen = 0;
   5994 				m_copyback(m0,
   5995 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5996 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5997 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5998 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5999 			}
   6000 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6001 			    sizeof(th.th_sum), &th.th_sum);
   6002 
   6003 			hlen += th.th_off << 2;
   6004 		} else {
   6005 			/*
   6006 			 * TCP/IP headers are in the first mbuf; we can do
   6007 			 * this the easy way.
   6008 			 */
   6009 			struct tcphdr *th;
   6010 
   6011 			if (v4) {
   6012 				struct ip *ip =
   6013 				    (void *)(mtod(m0, char *) + offset);
   6014 				th = (void *)(mtod(m0, char *) + hlen);
   6015 
   6016 				ip->ip_len = 0;
   6017 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6018 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6019 			} else {
   6020 				struct ip6_hdr *ip6 =
   6021 				    (void *)(mtod(m0, char *) + offset);
   6022 				th = (void *)(mtod(m0, char *) + hlen);
   6023 
   6024 				ip6->ip6_plen = 0;
   6025 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6026 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6027 			}
   6028 			hlen += th->th_off << 2;
   6029 		}
   6030 
   6031 		if (v4) {
   6032 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6033 			cmdlen |= WTX_TCPIP_CMD_IP;
   6034 		} else {
   6035 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6036 			ipcse = 0;
   6037 		}
   6038 		cmd |= WTX_TCPIP_CMD_TSE;
   6039 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6040 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6041 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6042 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6043 	}
   6044 
   6045 	/*
   6046 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6047 	 * offload feature, if we load the context descriptor, we
   6048 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6049 	 */
   6050 
   6051 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6052 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6053 	    WTX_TCPIP_IPCSE(ipcse);
   6054 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6055 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   6056 		fields |= WTX_IXSM;
   6057 	}
   6058 
   6059 	offset += iphl;
   6060 
   6061 	if (m0->m_pkthdr.csum_flags &
   6062 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6063 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6064 		fields |= WTX_TXSM;
   6065 		tucs = WTX_TCPIP_TUCSS(offset) |
   6066 		    WTX_TCPIP_TUCSO(offset +
   6067 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6068 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6069 	} else if ((m0->m_pkthdr.csum_flags &
   6070 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6071 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6072 		fields |= WTX_TXSM;
   6073 		tucs = WTX_TCPIP_TUCSS(offset) |
   6074 		    WTX_TCPIP_TUCSO(offset +
   6075 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6076 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6077 	} else {
   6078 		/* Just initialize it to a valid TCP context. */
   6079 		tucs = WTX_TCPIP_TUCSS(offset) |
   6080 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6081 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6082 	}
   6083 
   6084 	/* Fill in the context descriptor. */
   6085 	t = (struct livengood_tcpip_ctxdesc *)
   6086 	    &txq->txq_descs[txq->txq_next];
   6087 	t->tcpip_ipcs = htole32(ipcs);
   6088 	t->tcpip_tucs = htole32(tucs);
   6089 	t->tcpip_cmdlen = htole32(cmdlen);
   6090 	t->tcpip_seg = htole32(seg);
   6091 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6092 
   6093 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6094 	txs->txs_ndesc++;
   6095 
   6096 	*cmdp = cmd;
   6097 	*fieldsp = fields;
   6098 
   6099 	return 0;
   6100 }
   6101 
   6102 /*
   6103  * wm_start:		[ifnet interface function]
   6104  *
   6105  *	Start packet transmission on the interface.
   6106  */
   6107 static void
   6108 wm_start(struct ifnet *ifp)
   6109 {
   6110 	struct wm_softc *sc = ifp->if_softc;
   6111 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6112 
   6113 	WM_TX_LOCK(txq);
   6114 	if (!sc->sc_stopping)
   6115 		wm_start_locked(ifp);
   6116 	WM_TX_UNLOCK(txq);
   6117 }
   6118 
   6119 static void
   6120 wm_start_locked(struct ifnet *ifp)
   6121 {
   6122 	struct wm_softc *sc = ifp->if_softc;
   6123 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6124 	struct mbuf *m0;
   6125 	struct m_tag *mtag;
   6126 	struct wm_txsoft *txs;
   6127 	bus_dmamap_t dmamap;
   6128 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6129 	bus_addr_t curaddr;
   6130 	bus_size_t seglen, curlen;
   6131 	uint32_t cksumcmd;
   6132 	uint8_t cksumfields;
   6133 
   6134 	KASSERT(WM_TX_LOCKED(txq));
   6135 
   6136 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6137 		return;
   6138 
   6139 	/* Remember the previous number of free descriptors. */
   6140 	ofree = txq->txq_free;
   6141 
   6142 	/*
   6143 	 * Loop through the send queue, setting up transmit descriptors
   6144 	 * until we drain the queue, or use up all available transmit
   6145 	 * descriptors.
   6146 	 */
   6147 	for (;;) {
   6148 		m0 = NULL;
   6149 
   6150 		/* Get a work queue entry. */
   6151 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6152 			wm_txeof(sc, txq);
   6153 			if (txq->txq_sfree == 0) {
   6154 				DPRINTF(WM_DEBUG_TX,
   6155 				    ("%s: TX: no free job descriptors\n",
   6156 					device_xname(sc->sc_dev)));
   6157 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6158 				break;
   6159 			}
   6160 		}
   6161 
   6162 		/* Grab a packet off the queue. */
   6163 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6164 		if (m0 == NULL)
   6165 			break;
   6166 
   6167 		DPRINTF(WM_DEBUG_TX,
   6168 		    ("%s: TX: have packet to transmit: %p\n",
   6169 		    device_xname(sc->sc_dev), m0));
   6170 
   6171 		txs = &txq->txq_soft[txq->txq_snext];
   6172 		dmamap = txs->txs_dmamap;
   6173 
   6174 		use_tso = (m0->m_pkthdr.csum_flags &
   6175 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6176 
   6177 		/*
   6178 		 * So says the Linux driver:
   6179 		 * The controller does a simple calculation to make sure
   6180 		 * there is enough room in the FIFO before initiating the
   6181 		 * DMA for each buffer.  The calc is:
   6182 		 *	4 = ceil(buffer len / MSS)
   6183 		 * To make sure we don't overrun the FIFO, adjust the max
   6184 		 * buffer len if the MSS drops.
   6185 		 */
   6186 		dmamap->dm_maxsegsz =
   6187 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6188 		    ? m0->m_pkthdr.segsz << 2
   6189 		    : WTX_MAX_LEN;
   6190 
   6191 		/*
   6192 		 * Load the DMA map.  If this fails, the packet either
   6193 		 * didn't fit in the allotted number of segments, or we
   6194 		 * were short on resources.  For the too-many-segments
   6195 		 * case, we simply report an error and drop the packet,
   6196 		 * since we can't sanely copy a jumbo packet to a single
   6197 		 * buffer.
   6198 		 */
   6199 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6200 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6201 		if (error) {
   6202 			if (error == EFBIG) {
   6203 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6204 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6205 				    "DMA segments, dropping...\n",
   6206 				    device_xname(sc->sc_dev));
   6207 				wm_dump_mbuf_chain(sc, m0);
   6208 				m_freem(m0);
   6209 				continue;
   6210 			}
   6211 			/*  Short on resources, just stop for now. */
   6212 			DPRINTF(WM_DEBUG_TX,
   6213 			    ("%s: TX: dmamap load failed: %d\n",
   6214 			    device_xname(sc->sc_dev), error));
   6215 			break;
   6216 		}
   6217 
   6218 		segs_needed = dmamap->dm_nsegs;
   6219 		if (use_tso) {
   6220 			/* For sentinel descriptor; see below. */
   6221 			segs_needed++;
   6222 		}
   6223 
   6224 		/*
   6225 		 * Ensure we have enough descriptors free to describe
   6226 		 * the packet.  Note, we always reserve one descriptor
   6227 		 * at the end of the ring due to the semantics of the
   6228 		 * TDT register, plus one more in the event we need
   6229 		 * to load offload context.
   6230 		 */
   6231 		if (segs_needed > txq->txq_free - 2) {
   6232 			/*
   6233 			 * Not enough free descriptors to transmit this
   6234 			 * packet.  We haven't committed anything yet,
   6235 			 * so just unload the DMA map, put the packet
   6236 			 * pack on the queue, and punt.  Notify the upper
   6237 			 * layer that there are no more slots left.
   6238 			 */
   6239 			DPRINTF(WM_DEBUG_TX,
   6240 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6241 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6242 			    segs_needed, txq->txq_free - 1));
   6243 			ifp->if_flags |= IFF_OACTIVE;
   6244 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6245 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6246 			break;
   6247 		}
   6248 
   6249 		/*
   6250 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6251 		 * once we know we can transmit the packet, since we
   6252 		 * do some internal FIFO space accounting here.
   6253 		 */
   6254 		if (sc->sc_type == WM_T_82547 &&
   6255 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6256 			DPRINTF(WM_DEBUG_TX,
   6257 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6258 			    device_xname(sc->sc_dev)));
   6259 			ifp->if_flags |= IFF_OACTIVE;
   6260 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6261 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6262 			break;
   6263 		}
   6264 
   6265 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6266 
   6267 		DPRINTF(WM_DEBUG_TX,
   6268 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6269 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6270 
   6271 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6272 
   6273 		/*
   6274 		 * Store a pointer to the packet so that we can free it
   6275 		 * later.
   6276 		 *
   6277 		 * Initially, we consider the number of descriptors the
   6278 		 * packet uses the number of DMA segments.  This may be
   6279 		 * incremented by 1 if we do checksum offload (a descriptor
   6280 		 * is used to set the checksum context).
   6281 		 */
   6282 		txs->txs_mbuf = m0;
   6283 		txs->txs_firstdesc = txq->txq_next;
   6284 		txs->txs_ndesc = segs_needed;
   6285 
   6286 		/* Set up offload parameters for this packet. */
   6287 		if (m0->m_pkthdr.csum_flags &
   6288 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6289 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6290 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6291 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6292 					  &cksumfields) != 0) {
   6293 				/* Error message already displayed. */
   6294 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6295 				continue;
   6296 			}
   6297 		} else {
   6298 			cksumcmd = 0;
   6299 			cksumfields = 0;
   6300 		}
   6301 
   6302 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6303 
   6304 		/* Sync the DMA map. */
   6305 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6306 		    BUS_DMASYNC_PREWRITE);
   6307 
   6308 		/* Initialize the transmit descriptor. */
   6309 		for (nexttx = txq->txq_next, seg = 0;
   6310 		     seg < dmamap->dm_nsegs; seg++) {
   6311 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6312 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6313 			     seglen != 0;
   6314 			     curaddr += curlen, seglen -= curlen,
   6315 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6316 				curlen = seglen;
   6317 
   6318 				/*
   6319 				 * So says the Linux driver:
   6320 				 * Work around for premature descriptor
   6321 				 * write-backs in TSO mode.  Append a
   6322 				 * 4-byte sentinel descriptor.
   6323 				 */
   6324 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6325 				    curlen > 8)
   6326 					curlen -= 4;
   6327 
   6328 				wm_set_dma_addr(
   6329 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6330 				txq->txq_descs[nexttx].wtx_cmdlen
   6331 				    = htole32(cksumcmd | curlen);
   6332 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6333 				    = 0;
   6334 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6335 				    = cksumfields;
   6336 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6337 				lasttx = nexttx;
   6338 
   6339 				DPRINTF(WM_DEBUG_TX,
   6340 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6341 				     "len %#04zx\n",
   6342 				    device_xname(sc->sc_dev), nexttx,
   6343 				    (uint64_t)curaddr, curlen));
   6344 			}
   6345 		}
   6346 
   6347 		KASSERT(lasttx != -1);
   6348 
   6349 		/*
   6350 		 * Set up the command byte on the last descriptor of
   6351 		 * the packet.  If we're in the interrupt delay window,
   6352 		 * delay the interrupt.
   6353 		 */
   6354 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6355 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6356 
   6357 		/*
   6358 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6359 		 * up the descriptor to encapsulate the packet for us.
   6360 		 *
   6361 		 * This is only valid on the last descriptor of the packet.
   6362 		 */
   6363 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6364 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6365 			    htole32(WTX_CMD_VLE);
   6366 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6367 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6368 		}
   6369 
   6370 		txs->txs_lastdesc = lasttx;
   6371 
   6372 		DPRINTF(WM_DEBUG_TX,
   6373 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6374 		    device_xname(sc->sc_dev),
   6375 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6376 
   6377 		/* Sync the descriptors we're using. */
   6378 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6379 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6380 
   6381 		/* Give the packet to the chip. */
   6382 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6383 
   6384 		DPRINTF(WM_DEBUG_TX,
   6385 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6386 
   6387 		DPRINTF(WM_DEBUG_TX,
   6388 		    ("%s: TX: finished transmitting packet, job %d\n",
   6389 		    device_xname(sc->sc_dev), txq->txq_snext));
   6390 
   6391 		/* Advance the tx pointer. */
   6392 		txq->txq_free -= txs->txs_ndesc;
   6393 		txq->txq_next = nexttx;
   6394 
   6395 		txq->txq_sfree--;
   6396 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6397 
   6398 		/* Pass the packet to any BPF listeners. */
   6399 		bpf_mtap(ifp, m0);
   6400 	}
   6401 
   6402 	if (m0 != NULL) {
   6403 		ifp->if_flags |= IFF_OACTIVE;
   6404 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6405 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6406 			__func__));
   6407 		m_freem(m0);
   6408 	}
   6409 
   6410 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6411 		/* No more slots; notify upper layer. */
   6412 		ifp->if_flags |= IFF_OACTIVE;
   6413 	}
   6414 
   6415 	if (txq->txq_free != ofree) {
   6416 		/* Set a watchdog timer in case the chip flakes out. */
   6417 		ifp->if_timer = 5;
   6418 	}
   6419 }
   6420 
   6421 /*
   6422  * wm_nq_tx_offload:
   6423  *
   6424  *	Set up TCP/IP checksumming parameters for the
   6425  *	specified packet, for NEWQUEUE devices
   6426  */
   6427 static int
   6428 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6429     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6430 {
   6431 	struct mbuf *m0 = txs->txs_mbuf;
   6432 	struct m_tag *mtag;
   6433 	uint32_t vl_len, mssidx, cmdc;
   6434 	struct ether_header *eh;
   6435 	int offset, iphl;
   6436 
   6437 	/*
   6438 	 * XXX It would be nice if the mbuf pkthdr had offset
   6439 	 * fields for the protocol headers.
   6440 	 */
   6441 	*cmdlenp = 0;
   6442 	*fieldsp = 0;
   6443 
   6444 	eh = mtod(m0, struct ether_header *);
   6445 	switch (htons(eh->ether_type)) {
   6446 	case ETHERTYPE_IP:
   6447 	case ETHERTYPE_IPV6:
   6448 		offset = ETHER_HDR_LEN;
   6449 		break;
   6450 
   6451 	case ETHERTYPE_VLAN:
   6452 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6453 		break;
   6454 
   6455 	default:
   6456 		/* Don't support this protocol or encapsulation. */
   6457 		*do_csum = false;
   6458 		return 0;
   6459 	}
   6460 	*do_csum = true;
   6461 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6462 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6463 
   6464 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6465 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6466 
   6467 	if ((m0->m_pkthdr.csum_flags &
   6468 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6469 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6470 	} else {
   6471 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6472 	}
   6473 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6474 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6475 
   6476 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6477 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6478 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6479 		*cmdlenp |= NQTX_CMD_VLE;
   6480 	}
   6481 
   6482 	mssidx = 0;
   6483 
   6484 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6485 		int hlen = offset + iphl;
   6486 		int tcp_hlen;
   6487 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6488 
   6489 		if (__predict_false(m0->m_len <
   6490 				    (hlen + sizeof(struct tcphdr)))) {
   6491 			/*
   6492 			 * TCP/IP headers are not in the first mbuf; we need
   6493 			 * to do this the slow and painful way.  Let's just
   6494 			 * hope this doesn't happen very often.
   6495 			 */
   6496 			struct tcphdr th;
   6497 
   6498 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6499 
   6500 			m_copydata(m0, hlen, sizeof(th), &th);
   6501 			if (v4) {
   6502 				struct ip ip;
   6503 
   6504 				m_copydata(m0, offset, sizeof(ip), &ip);
   6505 				ip.ip_len = 0;
   6506 				m_copyback(m0,
   6507 				    offset + offsetof(struct ip, ip_len),
   6508 				    sizeof(ip.ip_len), &ip.ip_len);
   6509 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6510 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6511 			} else {
   6512 				struct ip6_hdr ip6;
   6513 
   6514 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6515 				ip6.ip6_plen = 0;
   6516 				m_copyback(m0,
   6517 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6518 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6519 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6520 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6521 			}
   6522 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6523 			    sizeof(th.th_sum), &th.th_sum);
   6524 
   6525 			tcp_hlen = th.th_off << 2;
   6526 		} else {
   6527 			/*
   6528 			 * TCP/IP headers are in the first mbuf; we can do
   6529 			 * this the easy way.
   6530 			 */
   6531 			struct tcphdr *th;
   6532 
   6533 			if (v4) {
   6534 				struct ip *ip =
   6535 				    (void *)(mtod(m0, char *) + offset);
   6536 				th = (void *)(mtod(m0, char *) + hlen);
   6537 
   6538 				ip->ip_len = 0;
   6539 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6540 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6541 			} else {
   6542 				struct ip6_hdr *ip6 =
   6543 				    (void *)(mtod(m0, char *) + offset);
   6544 				th = (void *)(mtod(m0, char *) + hlen);
   6545 
   6546 				ip6->ip6_plen = 0;
   6547 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6548 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6549 			}
   6550 			tcp_hlen = th->th_off << 2;
   6551 		}
   6552 		hlen += tcp_hlen;
   6553 		*cmdlenp |= NQTX_CMD_TSE;
   6554 
   6555 		if (v4) {
   6556 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6557 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6558 		} else {
   6559 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6560 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6561 		}
   6562 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6563 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6564 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6565 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6566 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6567 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6568 	} else {
   6569 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6570 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6571 	}
   6572 
   6573 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6574 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6575 		cmdc |= NQTXC_CMD_IP4;
   6576 	}
   6577 
   6578 	if (m0->m_pkthdr.csum_flags &
   6579 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6580 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6581 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6582 			cmdc |= NQTXC_CMD_TCP;
   6583 		} else {
   6584 			cmdc |= NQTXC_CMD_UDP;
   6585 		}
   6586 		cmdc |= NQTXC_CMD_IP4;
   6587 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6588 	}
   6589 	if (m0->m_pkthdr.csum_flags &
   6590 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6591 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6592 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6593 			cmdc |= NQTXC_CMD_TCP;
   6594 		} else {
   6595 			cmdc |= NQTXC_CMD_UDP;
   6596 		}
   6597 		cmdc |= NQTXC_CMD_IP6;
   6598 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6599 	}
   6600 
   6601 	/* Fill in the context descriptor. */
   6602 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6603 	    htole32(vl_len);
   6604 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6605 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6606 	    htole32(cmdc);
   6607 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6608 	    htole32(mssidx);
   6609 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6610 	DPRINTF(WM_DEBUG_TX,
   6611 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6612 	    txq->txq_next, 0, vl_len));
   6613 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6614 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6615 	txs->txs_ndesc++;
   6616 	return 0;
   6617 }
   6618 
   6619 /*
   6620  * wm_nq_start:		[ifnet interface function]
   6621  *
   6622  *	Start packet transmission on the interface for NEWQUEUE devices
   6623  */
   6624 static void
   6625 wm_nq_start(struct ifnet *ifp)
   6626 {
   6627 	struct wm_softc *sc = ifp->if_softc;
   6628 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6629 
   6630 	WM_TX_LOCK(txq);
   6631 	if (!sc->sc_stopping)
   6632 		wm_nq_start_locked(ifp);
   6633 	WM_TX_UNLOCK(txq);
   6634 }
   6635 
   6636 static void
   6637 wm_nq_start_locked(struct ifnet *ifp)
   6638 {
   6639 	struct wm_softc *sc = ifp->if_softc;
   6640 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6641 
   6642 	wm_nq_send_common_locked(ifp, txq, false);
   6643 }
   6644 
   6645 static inline int
   6646 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6647 {
   6648 	struct wm_softc *sc = ifp->if_softc;
   6649 	u_int cpuid = cpu_index(curcpu());
   6650 
   6651 	/*
   6652 	 * Currently, simple distribute strategy.
   6653 	 * TODO:
   6654 	 * destribute by flowid(RSS has value).
   6655 	 */
   6656 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6657 }
   6658 
   6659 static int
   6660 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6661 {
   6662 	int qid;
   6663 	struct wm_softc *sc = ifp->if_softc;
   6664 	struct wm_txqueue *txq;
   6665 
   6666 	qid = wm_nq_select_txqueue(ifp, m);
   6667 	txq = &sc->sc_queue[qid].wmq_txq;
   6668 
   6669 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6670 		m_freem(m);
   6671 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6672 		return ENOBUFS;
   6673 	}
   6674 
   6675 	if (WM_TX_TRYLOCK(txq)) {
   6676 		/* XXXX should be per TX queue */
   6677 		ifp->if_obytes += m->m_pkthdr.len;
   6678 		if (m->m_flags & M_MCAST)
   6679 			ifp->if_omcasts++;
   6680 
   6681 		if (!sc->sc_stopping)
   6682 			wm_nq_transmit_locked(ifp, txq);
   6683 		WM_TX_UNLOCK(txq);
   6684 	}
   6685 
   6686 	return 0;
   6687 }
   6688 
   6689 static void
   6690 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6691 {
   6692 
   6693 	wm_nq_send_common_locked(ifp, txq, true);
   6694 }
   6695 
   6696 static void
   6697 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6698     bool is_transmit)
   6699 {
   6700 	struct wm_softc *sc = ifp->if_softc;
   6701 	struct mbuf *m0;
   6702 	struct m_tag *mtag;
   6703 	struct wm_txsoft *txs;
   6704 	bus_dmamap_t dmamap;
   6705 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6706 	bool do_csum, sent;
   6707 
   6708 	KASSERT(WM_TX_LOCKED(txq));
   6709 
   6710 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6711 		return;
   6712 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6713 		return;
   6714 
   6715 	sent = false;
   6716 
   6717 	/*
   6718 	 * Loop through the send queue, setting up transmit descriptors
   6719 	 * until we drain the queue, or use up all available transmit
   6720 	 * descriptors.
   6721 	 */
   6722 	for (;;) {
   6723 		m0 = NULL;
   6724 
   6725 		/* Get a work queue entry. */
   6726 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6727 			wm_txeof(sc, txq);
   6728 			if (txq->txq_sfree == 0) {
   6729 				DPRINTF(WM_DEBUG_TX,
   6730 				    ("%s: TX: no free job descriptors\n",
   6731 					device_xname(sc->sc_dev)));
   6732 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6733 				break;
   6734 			}
   6735 		}
   6736 
   6737 		/* Grab a packet off the queue. */
   6738 		if (is_transmit)
   6739 			m0 = pcq_get(txq->txq_interq);
   6740 		else
   6741 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6742 		if (m0 == NULL)
   6743 			break;
   6744 
   6745 		DPRINTF(WM_DEBUG_TX,
   6746 		    ("%s: TX: have packet to transmit: %p\n",
   6747 		    device_xname(sc->sc_dev), m0));
   6748 
   6749 		txs = &txq->txq_soft[txq->txq_snext];
   6750 		dmamap = txs->txs_dmamap;
   6751 
   6752 		/*
   6753 		 * Load the DMA map.  If this fails, the packet either
   6754 		 * didn't fit in the allotted number of segments, or we
   6755 		 * were short on resources.  For the too-many-segments
   6756 		 * case, we simply report an error and drop the packet,
   6757 		 * since we can't sanely copy a jumbo packet to a single
   6758 		 * buffer.
   6759 		 */
   6760 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6761 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6762 		if (error) {
   6763 			if (error == EFBIG) {
   6764 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6765 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6766 				    "DMA segments, dropping...\n",
   6767 				    device_xname(sc->sc_dev));
   6768 				wm_dump_mbuf_chain(sc, m0);
   6769 				m_freem(m0);
   6770 				continue;
   6771 			}
   6772 			/* Short on resources, just stop for now. */
   6773 			DPRINTF(WM_DEBUG_TX,
   6774 			    ("%s: TX: dmamap load failed: %d\n",
   6775 			    device_xname(sc->sc_dev), error));
   6776 			break;
   6777 		}
   6778 
   6779 		segs_needed = dmamap->dm_nsegs;
   6780 
   6781 		/*
   6782 		 * Ensure we have enough descriptors free to describe
   6783 		 * the packet.  Note, we always reserve one descriptor
   6784 		 * at the end of the ring due to the semantics of the
   6785 		 * TDT register, plus one more in the event we need
   6786 		 * to load offload context.
   6787 		 */
   6788 		if (segs_needed > txq->txq_free - 2) {
   6789 			/*
   6790 			 * Not enough free descriptors to transmit this
   6791 			 * packet.  We haven't committed anything yet,
   6792 			 * so just unload the DMA map, put the packet
   6793 			 * pack on the queue, and punt.  Notify the upper
   6794 			 * layer that there are no more slots left.
   6795 			 */
   6796 			DPRINTF(WM_DEBUG_TX,
   6797 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6798 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6799 			    segs_needed, txq->txq_free - 1));
   6800 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6801 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6802 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6803 			break;
   6804 		}
   6805 
   6806 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6807 
   6808 		DPRINTF(WM_DEBUG_TX,
   6809 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6810 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6811 
   6812 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6813 
   6814 		/*
   6815 		 * Store a pointer to the packet so that we can free it
   6816 		 * later.
   6817 		 *
   6818 		 * Initially, we consider the number of descriptors the
   6819 		 * packet uses the number of DMA segments.  This may be
   6820 		 * incremented by 1 if we do checksum offload (a descriptor
   6821 		 * is used to set the checksum context).
   6822 		 */
   6823 		txs->txs_mbuf = m0;
   6824 		txs->txs_firstdesc = txq->txq_next;
   6825 		txs->txs_ndesc = segs_needed;
   6826 
   6827 		/* Set up offload parameters for this packet. */
   6828 		uint32_t cmdlen, fields, dcmdlen;
   6829 		if (m0->m_pkthdr.csum_flags &
   6830 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6831 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6832 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6833 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6834 			    &do_csum) != 0) {
   6835 				/* Error message already displayed. */
   6836 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6837 				continue;
   6838 			}
   6839 		} else {
   6840 			do_csum = false;
   6841 			cmdlen = 0;
   6842 			fields = 0;
   6843 		}
   6844 
   6845 		/* Sync the DMA map. */
   6846 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6847 		    BUS_DMASYNC_PREWRITE);
   6848 
   6849 		/* Initialize the first transmit descriptor. */
   6850 		nexttx = txq->txq_next;
   6851 		if (!do_csum) {
   6852 			/* setup a legacy descriptor */
   6853 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6854 			    dmamap->dm_segs[0].ds_addr);
   6855 			txq->txq_descs[nexttx].wtx_cmdlen =
   6856 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6857 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6858 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6859 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6860 			    NULL) {
   6861 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6862 				    htole32(WTX_CMD_VLE);
   6863 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6864 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6865 			} else {
   6866 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6867 			}
   6868 			dcmdlen = 0;
   6869 		} else {
   6870 			/* setup an advanced data descriptor */
   6871 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6872 			    htole64(dmamap->dm_segs[0].ds_addr);
   6873 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6874 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6875 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6876 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6877 			    htole32(fields);
   6878 			DPRINTF(WM_DEBUG_TX,
   6879 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6880 			    device_xname(sc->sc_dev), nexttx,
   6881 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6882 			DPRINTF(WM_DEBUG_TX,
   6883 			    ("\t 0x%08x%08x\n", fields,
   6884 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6885 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6886 		}
   6887 
   6888 		lasttx = nexttx;
   6889 		nexttx = WM_NEXTTX(txq, nexttx);
   6890 		/*
   6891 		 * fill in the next descriptors. legacy or adcanced format
   6892 		 * is the same here
   6893 		 */
   6894 		for (seg = 1; seg < dmamap->dm_nsegs;
   6895 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6896 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6897 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6898 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6899 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6900 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6901 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6902 			lasttx = nexttx;
   6903 
   6904 			DPRINTF(WM_DEBUG_TX,
   6905 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6906 			     "len %#04zx\n",
   6907 			    device_xname(sc->sc_dev), nexttx,
   6908 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6909 			    dmamap->dm_segs[seg].ds_len));
   6910 		}
   6911 
   6912 		KASSERT(lasttx != -1);
   6913 
   6914 		/*
   6915 		 * Set up the command byte on the last descriptor of
   6916 		 * the packet.  If we're in the interrupt delay window,
   6917 		 * delay the interrupt.
   6918 		 */
   6919 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6920 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6921 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6922 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6923 
   6924 		txs->txs_lastdesc = lasttx;
   6925 
   6926 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6927 		    device_xname(sc->sc_dev),
   6928 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6929 
   6930 		/* Sync the descriptors we're using. */
   6931 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6932 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6933 
   6934 		/* Give the packet to the chip. */
   6935 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6936 		sent = true;
   6937 
   6938 		DPRINTF(WM_DEBUG_TX,
   6939 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6940 
   6941 		DPRINTF(WM_DEBUG_TX,
   6942 		    ("%s: TX: finished transmitting packet, job %d\n",
   6943 		    device_xname(sc->sc_dev), txq->txq_snext));
   6944 
   6945 		/* Advance the tx pointer. */
   6946 		txq->txq_free -= txs->txs_ndesc;
   6947 		txq->txq_next = nexttx;
   6948 
   6949 		txq->txq_sfree--;
   6950 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6951 
   6952 		/* Pass the packet to any BPF listeners. */
   6953 		bpf_mtap(ifp, m0);
   6954 	}
   6955 
   6956 	if (m0 != NULL) {
   6957 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6958 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6959 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6960 			__func__));
   6961 		m_freem(m0);
   6962 	}
   6963 
   6964 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6965 		/* No more slots; notify upper layer. */
   6966 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6967 	}
   6968 
   6969 	if (sent) {
   6970 		/* Set a watchdog timer in case the chip flakes out. */
   6971 		ifp->if_timer = 5;
   6972 	}
   6973 }
   6974 
   6975 /* Interrupt */
   6976 
   6977 /*
   6978  * wm_txeof:
   6979  *
   6980  *	Helper; handle transmit interrupts.
   6981  */
   6982 static int
   6983 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   6984 {
   6985 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6986 	struct wm_txsoft *txs;
   6987 	bool processed = false;
   6988 	int count = 0;
   6989 	int i;
   6990 	uint8_t status;
   6991 
   6992 	KASSERT(WM_TX_LOCKED(txq));
   6993 
   6994 	if (sc->sc_stopping)
   6995 		return 0;
   6996 
   6997 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   6998 
   6999 	/*
   7000 	 * Go through the Tx list and free mbufs for those
   7001 	 * frames which have been transmitted.
   7002 	 */
   7003 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7004 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7005 		txs = &txq->txq_soft[i];
   7006 
   7007 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7008 			device_xname(sc->sc_dev), i));
   7009 
   7010 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7011 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7012 
   7013 		status =
   7014 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7015 		if ((status & WTX_ST_DD) == 0) {
   7016 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7017 			    BUS_DMASYNC_PREREAD);
   7018 			break;
   7019 		}
   7020 
   7021 		processed = true;
   7022 		count++;
   7023 		DPRINTF(WM_DEBUG_TX,
   7024 		    ("%s: TX: job %d done: descs %d..%d\n",
   7025 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7026 		    txs->txs_lastdesc));
   7027 
   7028 		/*
   7029 		 * XXX We should probably be using the statistics
   7030 		 * XXX registers, but I don't know if they exist
   7031 		 * XXX on chips before the i82544.
   7032 		 */
   7033 
   7034 #ifdef WM_EVENT_COUNTERS
   7035 		if (status & WTX_ST_TU)
   7036 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   7037 #endif /* WM_EVENT_COUNTERS */
   7038 
   7039 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7040 			ifp->if_oerrors++;
   7041 			if (status & WTX_ST_LC)
   7042 				log(LOG_WARNING, "%s: late collision\n",
   7043 				    device_xname(sc->sc_dev));
   7044 			else if (status & WTX_ST_EC) {
   7045 				ifp->if_collisions += 16;
   7046 				log(LOG_WARNING, "%s: excessive collisions\n",
   7047 				    device_xname(sc->sc_dev));
   7048 			}
   7049 		} else
   7050 			ifp->if_opackets++;
   7051 
   7052 		txq->txq_free += txs->txs_ndesc;
   7053 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7054 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7055 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7056 		m_freem(txs->txs_mbuf);
   7057 		txs->txs_mbuf = NULL;
   7058 	}
   7059 
   7060 	/* Update the dirty transmit buffer pointer. */
   7061 	txq->txq_sdirty = i;
   7062 	DPRINTF(WM_DEBUG_TX,
   7063 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7064 
   7065 	if (count != 0)
   7066 		rnd_add_uint32(&sc->rnd_source, count);
   7067 
   7068 	/*
   7069 	 * If there are no more pending transmissions, cancel the watchdog
   7070 	 * timer.
   7071 	 */
   7072 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7073 		ifp->if_timer = 0;
   7074 
   7075 	return processed;
   7076 }
   7077 
   7078 /*
   7079  * wm_rxeof:
   7080  *
   7081  *	Helper; handle receive interrupts.
   7082  */
   7083 static void
   7084 wm_rxeof(struct wm_rxqueue *rxq)
   7085 {
   7086 	struct wm_softc *sc = rxq->rxq_sc;
   7087 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7088 	struct wm_rxsoft *rxs;
   7089 	struct mbuf *m;
   7090 	int i, len;
   7091 	int count = 0;
   7092 	uint8_t status, errors;
   7093 	uint16_t vlantag;
   7094 
   7095 	KASSERT(WM_RX_LOCKED(rxq));
   7096 
   7097 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7098 		rxs = &rxq->rxq_soft[i];
   7099 
   7100 		DPRINTF(WM_DEBUG_RX,
   7101 		    ("%s: RX: checking descriptor %d\n",
   7102 		    device_xname(sc->sc_dev), i));
   7103 
   7104 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7105 
   7106 		status = rxq->rxq_descs[i].wrx_status;
   7107 		errors = rxq->rxq_descs[i].wrx_errors;
   7108 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7109 		vlantag = rxq->rxq_descs[i].wrx_special;
   7110 
   7111 		if ((status & WRX_ST_DD) == 0) {
   7112 			/* We have processed all of the receive descriptors. */
   7113 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7114 			break;
   7115 		}
   7116 
   7117 		count++;
   7118 		if (__predict_false(rxq->rxq_discard)) {
   7119 			DPRINTF(WM_DEBUG_RX,
   7120 			    ("%s: RX: discarding contents of descriptor %d\n",
   7121 			    device_xname(sc->sc_dev), i));
   7122 			wm_init_rxdesc(rxq, i);
   7123 			if (status & WRX_ST_EOP) {
   7124 				/* Reset our state. */
   7125 				DPRINTF(WM_DEBUG_RX,
   7126 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7127 				    device_xname(sc->sc_dev)));
   7128 				rxq->rxq_discard = 0;
   7129 			}
   7130 			continue;
   7131 		}
   7132 
   7133 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7134 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7135 
   7136 		m = rxs->rxs_mbuf;
   7137 
   7138 		/*
   7139 		 * Add a new receive buffer to the ring, unless of
   7140 		 * course the length is zero. Treat the latter as a
   7141 		 * failed mapping.
   7142 		 */
   7143 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7144 			/*
   7145 			 * Failed, throw away what we've done so
   7146 			 * far, and discard the rest of the packet.
   7147 			 */
   7148 			ifp->if_ierrors++;
   7149 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7150 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7151 			wm_init_rxdesc(rxq, i);
   7152 			if ((status & WRX_ST_EOP) == 0)
   7153 				rxq->rxq_discard = 1;
   7154 			if (rxq->rxq_head != NULL)
   7155 				m_freem(rxq->rxq_head);
   7156 			WM_RXCHAIN_RESET(rxq);
   7157 			DPRINTF(WM_DEBUG_RX,
   7158 			    ("%s: RX: Rx buffer allocation failed, "
   7159 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7160 			    rxq->rxq_discard ? " (discard)" : ""));
   7161 			continue;
   7162 		}
   7163 
   7164 		m->m_len = len;
   7165 		rxq->rxq_len += len;
   7166 		DPRINTF(WM_DEBUG_RX,
   7167 		    ("%s: RX: buffer at %p len %d\n",
   7168 		    device_xname(sc->sc_dev), m->m_data, len));
   7169 
   7170 		/* If this is not the end of the packet, keep looking. */
   7171 		if ((status & WRX_ST_EOP) == 0) {
   7172 			WM_RXCHAIN_LINK(rxq, m);
   7173 			DPRINTF(WM_DEBUG_RX,
   7174 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7175 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7176 			continue;
   7177 		}
   7178 
   7179 		/*
   7180 		 * Okay, we have the entire packet now.  The chip is
   7181 		 * configured to include the FCS except I350 and I21[01]
   7182 		 * (not all chips can be configured to strip it),
   7183 		 * so we need to trim it.
   7184 		 * May need to adjust length of previous mbuf in the
   7185 		 * chain if the current mbuf is too short.
   7186 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7187 		 * is always set in I350, so we don't trim it.
   7188 		 */
   7189 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7190 		    && (sc->sc_type != WM_T_I210)
   7191 		    && (sc->sc_type != WM_T_I211)) {
   7192 			if (m->m_len < ETHER_CRC_LEN) {
   7193 				rxq->rxq_tail->m_len
   7194 				    -= (ETHER_CRC_LEN - m->m_len);
   7195 				m->m_len = 0;
   7196 			} else
   7197 				m->m_len -= ETHER_CRC_LEN;
   7198 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7199 		} else
   7200 			len = rxq->rxq_len;
   7201 
   7202 		WM_RXCHAIN_LINK(rxq, m);
   7203 
   7204 		*rxq->rxq_tailp = NULL;
   7205 		m = rxq->rxq_head;
   7206 
   7207 		WM_RXCHAIN_RESET(rxq);
   7208 
   7209 		DPRINTF(WM_DEBUG_RX,
   7210 		    ("%s: RX: have entire packet, len -> %d\n",
   7211 		    device_xname(sc->sc_dev), len));
   7212 
   7213 		/* If an error occurred, update stats and drop the packet. */
   7214 		if (errors &
   7215 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7216 			if (errors & WRX_ER_SE)
   7217 				log(LOG_WARNING, "%s: symbol error\n",
   7218 				    device_xname(sc->sc_dev));
   7219 			else if (errors & WRX_ER_SEQ)
   7220 				log(LOG_WARNING, "%s: receive sequence error\n",
   7221 				    device_xname(sc->sc_dev));
   7222 			else if (errors & WRX_ER_CE)
   7223 				log(LOG_WARNING, "%s: CRC error\n",
   7224 				    device_xname(sc->sc_dev));
   7225 			m_freem(m);
   7226 			continue;
   7227 		}
   7228 
   7229 		/* No errors.  Receive the packet. */
   7230 		m->m_pkthdr.rcvif = ifp;
   7231 		m->m_pkthdr.len = len;
   7232 
   7233 		/*
   7234 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7235 		 * for us.  Associate the tag with the packet.
   7236 		 */
   7237 		/* XXXX should check for i350 and i354 */
   7238 		if ((status & WRX_ST_VP) != 0) {
   7239 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7240 		}
   7241 
   7242 		/* Set up checksum info for this packet. */
   7243 		if ((status & WRX_ST_IXSM) == 0) {
   7244 			if (status & WRX_ST_IPCS) {
   7245 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7246 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7247 				if (errors & WRX_ER_IPE)
   7248 					m->m_pkthdr.csum_flags |=
   7249 					    M_CSUM_IPv4_BAD;
   7250 			}
   7251 			if (status & WRX_ST_TCPCS) {
   7252 				/*
   7253 				 * Note: we don't know if this was TCP or UDP,
   7254 				 * so we just set both bits, and expect the
   7255 				 * upper layers to deal.
   7256 				 */
   7257 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7258 				m->m_pkthdr.csum_flags |=
   7259 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7260 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7261 				if (errors & WRX_ER_TCPE)
   7262 					m->m_pkthdr.csum_flags |=
   7263 					    M_CSUM_TCP_UDP_BAD;
   7264 			}
   7265 		}
   7266 
   7267 		ifp->if_ipackets++;
   7268 
   7269 		WM_RX_UNLOCK(rxq);
   7270 
   7271 		/* Pass this up to any BPF listeners. */
   7272 		bpf_mtap(ifp, m);
   7273 
   7274 		/* Pass it on. */
   7275 		if_percpuq_enqueue(sc->sc_ipq, m);
   7276 
   7277 		WM_RX_LOCK(rxq);
   7278 
   7279 		if (sc->sc_stopping)
   7280 			break;
   7281 	}
   7282 
   7283 	/* Update the receive pointer. */
   7284 	rxq->rxq_ptr = i;
   7285 	if (count != 0)
   7286 		rnd_add_uint32(&sc->rnd_source, count);
   7287 
   7288 	DPRINTF(WM_DEBUG_RX,
   7289 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7290 }
   7291 
   7292 /*
   7293  * wm_linkintr_gmii:
   7294  *
   7295  *	Helper; handle link interrupts for GMII.
   7296  */
   7297 static void
   7298 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7299 {
   7300 
   7301 	KASSERT(WM_CORE_LOCKED(sc));
   7302 
   7303 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7304 		__func__));
   7305 
   7306 	if (icr & ICR_LSC) {
   7307 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7308 
   7309 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7310 			wm_gig_downshift_workaround_ich8lan(sc);
   7311 
   7312 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7313 			device_xname(sc->sc_dev)));
   7314 		mii_pollstat(&sc->sc_mii);
   7315 		if (sc->sc_type == WM_T_82543) {
   7316 			int miistatus, active;
   7317 
   7318 			/*
   7319 			 * With 82543, we need to force speed and
   7320 			 * duplex on the MAC equal to what the PHY
   7321 			 * speed and duplex configuration is.
   7322 			 */
   7323 			miistatus = sc->sc_mii.mii_media_status;
   7324 
   7325 			if (miistatus & IFM_ACTIVE) {
   7326 				active = sc->sc_mii.mii_media_active;
   7327 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7328 				switch (IFM_SUBTYPE(active)) {
   7329 				case IFM_10_T:
   7330 					sc->sc_ctrl |= CTRL_SPEED_10;
   7331 					break;
   7332 				case IFM_100_TX:
   7333 					sc->sc_ctrl |= CTRL_SPEED_100;
   7334 					break;
   7335 				case IFM_1000_T:
   7336 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7337 					break;
   7338 				default:
   7339 					/*
   7340 					 * fiber?
   7341 					 * Shoud not enter here.
   7342 					 */
   7343 					printf("unknown media (%x)\n", active);
   7344 					break;
   7345 				}
   7346 				if (active & IFM_FDX)
   7347 					sc->sc_ctrl |= CTRL_FD;
   7348 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7349 			}
   7350 		} else if ((sc->sc_type == WM_T_ICH8)
   7351 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7352 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7353 		} else if (sc->sc_type == WM_T_PCH) {
   7354 			wm_k1_gig_workaround_hv(sc,
   7355 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7356 		}
   7357 
   7358 		if ((sc->sc_phytype == WMPHY_82578)
   7359 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7360 			== IFM_1000_T)) {
   7361 
   7362 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7363 				delay(200*1000); /* XXX too big */
   7364 
   7365 				/* Link stall fix for link up */
   7366 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7367 				    HV_MUX_DATA_CTRL,
   7368 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7369 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7370 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7371 				    HV_MUX_DATA_CTRL,
   7372 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7373 			}
   7374 		}
   7375 	} else if (icr & ICR_RXSEQ) {
   7376 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7377 			device_xname(sc->sc_dev)));
   7378 	}
   7379 }
   7380 
   7381 /*
   7382  * wm_linkintr_tbi:
   7383  *
   7384  *	Helper; handle link interrupts for TBI mode.
   7385  */
   7386 static void
   7387 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7388 {
   7389 	uint32_t status;
   7390 
   7391 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7392 		__func__));
   7393 
   7394 	status = CSR_READ(sc, WMREG_STATUS);
   7395 	if (icr & ICR_LSC) {
   7396 		if (status & STATUS_LU) {
   7397 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7398 			    device_xname(sc->sc_dev),
   7399 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7400 			/*
   7401 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7402 			 * so we should update sc->sc_ctrl
   7403 			 */
   7404 
   7405 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7406 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7407 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7408 			if (status & STATUS_FD)
   7409 				sc->sc_tctl |=
   7410 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7411 			else
   7412 				sc->sc_tctl |=
   7413 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7414 			if (sc->sc_ctrl & CTRL_TFCE)
   7415 				sc->sc_fcrtl |= FCRTL_XONE;
   7416 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7417 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7418 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7419 				      sc->sc_fcrtl);
   7420 			sc->sc_tbi_linkup = 1;
   7421 		} else {
   7422 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7423 			    device_xname(sc->sc_dev)));
   7424 			sc->sc_tbi_linkup = 0;
   7425 		}
   7426 		/* Update LED */
   7427 		wm_tbi_serdes_set_linkled(sc);
   7428 	} else if (icr & ICR_RXSEQ) {
   7429 		DPRINTF(WM_DEBUG_LINK,
   7430 		    ("%s: LINK: Receive sequence error\n",
   7431 		    device_xname(sc->sc_dev)));
   7432 	}
   7433 }
   7434 
   7435 /*
   7436  * wm_linkintr_serdes:
   7437  *
   7438  *	Helper; handle link interrupts for TBI mode.
   7439  */
   7440 static void
   7441 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7442 {
   7443 	struct mii_data *mii = &sc->sc_mii;
   7444 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7445 	uint32_t pcs_adv, pcs_lpab, reg;
   7446 
   7447 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7448 		__func__));
   7449 
   7450 	if (icr & ICR_LSC) {
   7451 		/* Check PCS */
   7452 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7453 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7454 			mii->mii_media_status |= IFM_ACTIVE;
   7455 			sc->sc_tbi_linkup = 1;
   7456 		} else {
   7457 			mii->mii_media_status |= IFM_NONE;
   7458 			sc->sc_tbi_linkup = 0;
   7459 			wm_tbi_serdes_set_linkled(sc);
   7460 			return;
   7461 		}
   7462 		mii->mii_media_active |= IFM_1000_SX;
   7463 		if ((reg & PCS_LSTS_FDX) != 0)
   7464 			mii->mii_media_active |= IFM_FDX;
   7465 		else
   7466 			mii->mii_media_active |= IFM_HDX;
   7467 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7468 			/* Check flow */
   7469 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7470 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7471 				DPRINTF(WM_DEBUG_LINK,
   7472 				    ("XXX LINKOK but not ACOMP\n"));
   7473 				return;
   7474 			}
   7475 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7476 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7477 			DPRINTF(WM_DEBUG_LINK,
   7478 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7479 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7480 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7481 				mii->mii_media_active |= IFM_FLOW
   7482 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7483 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7484 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7485 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7486 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7487 				mii->mii_media_active |= IFM_FLOW
   7488 				    | IFM_ETH_TXPAUSE;
   7489 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7490 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7491 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7492 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7493 				mii->mii_media_active |= IFM_FLOW
   7494 				    | IFM_ETH_RXPAUSE;
   7495 		}
   7496 		/* Update LED */
   7497 		wm_tbi_serdes_set_linkled(sc);
   7498 	} else {
   7499 		DPRINTF(WM_DEBUG_LINK,
   7500 		    ("%s: LINK: Receive sequence error\n",
   7501 		    device_xname(sc->sc_dev)));
   7502 	}
   7503 }
   7504 
   7505 /*
   7506  * wm_linkintr:
   7507  *
   7508  *	Helper; handle link interrupts.
   7509  */
   7510 static void
   7511 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7512 {
   7513 
   7514 	KASSERT(WM_CORE_LOCKED(sc));
   7515 
   7516 	if (sc->sc_flags & WM_F_HAS_MII)
   7517 		wm_linkintr_gmii(sc, icr);
   7518 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7519 	    && (sc->sc_type >= WM_T_82575))
   7520 		wm_linkintr_serdes(sc, icr);
   7521 	else
   7522 		wm_linkintr_tbi(sc, icr);
   7523 }
   7524 
   7525 /*
   7526  * wm_intr_legacy:
   7527  *
   7528  *	Interrupt service routine for INTx and MSI.
   7529  */
   7530 static int
   7531 wm_intr_legacy(void *arg)
   7532 {
   7533 	struct wm_softc *sc = arg;
   7534 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7535 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7536 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7537 	uint32_t icr, rndval = 0;
   7538 	int handled = 0;
   7539 
   7540 	DPRINTF(WM_DEBUG_TX,
   7541 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7542 	while (1 /* CONSTCOND */) {
   7543 		icr = CSR_READ(sc, WMREG_ICR);
   7544 		if ((icr & sc->sc_icr) == 0)
   7545 			break;
   7546 		if (rndval == 0)
   7547 			rndval = icr;
   7548 
   7549 		WM_RX_LOCK(rxq);
   7550 
   7551 		if (sc->sc_stopping) {
   7552 			WM_RX_UNLOCK(rxq);
   7553 			break;
   7554 		}
   7555 
   7556 		handled = 1;
   7557 
   7558 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7559 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7560 			DPRINTF(WM_DEBUG_RX,
   7561 			    ("%s: RX: got Rx intr 0x%08x\n",
   7562 			    device_xname(sc->sc_dev),
   7563 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7564 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7565 		}
   7566 #endif
   7567 		wm_rxeof(rxq);
   7568 
   7569 		WM_RX_UNLOCK(rxq);
   7570 		WM_TX_LOCK(txq);
   7571 
   7572 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7573 		if (icr & ICR_TXDW) {
   7574 			DPRINTF(WM_DEBUG_TX,
   7575 			    ("%s: TX: got TXDW interrupt\n",
   7576 			    device_xname(sc->sc_dev)));
   7577 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7578 		}
   7579 #endif
   7580 		wm_txeof(sc, txq);
   7581 
   7582 		WM_TX_UNLOCK(txq);
   7583 		WM_CORE_LOCK(sc);
   7584 
   7585 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7586 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7587 			wm_linkintr(sc, icr);
   7588 		}
   7589 
   7590 		WM_CORE_UNLOCK(sc);
   7591 
   7592 		if (icr & ICR_RXO) {
   7593 #if defined(WM_DEBUG)
   7594 			log(LOG_WARNING, "%s: Receive overrun\n",
   7595 			    device_xname(sc->sc_dev));
   7596 #endif /* defined(WM_DEBUG) */
   7597 		}
   7598 	}
   7599 
   7600 	rnd_add_uint32(&sc->rnd_source, rndval);
   7601 
   7602 	if (handled) {
   7603 		/* Try to get more packets going. */
   7604 		ifp->if_start(ifp);
   7605 	}
   7606 
   7607 	return handled;
   7608 }
   7609 
   7610 static int
   7611 wm_txrxintr_msix(void *arg)
   7612 {
   7613 	struct wm_queue *wmq = arg;
   7614 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7615 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7616 	struct wm_softc *sc = txq->txq_sc;
   7617 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7618 
   7619 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7620 
   7621 	DPRINTF(WM_DEBUG_TX,
   7622 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7623 
   7624 	if (sc->sc_type == WM_T_82574)
   7625 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7626 	else if (sc->sc_type == WM_T_82575)
   7627 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7628 	else
   7629 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7630 
   7631 	if (!sc->sc_stopping) {
   7632 		WM_TX_LOCK(txq);
   7633 
   7634 		WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7635 		wm_txeof(sc, txq);
   7636 
   7637 		/* Try to get more packets going. */
   7638 		if (pcq_peek(txq->txq_interq) != NULL)
   7639 			wm_nq_transmit_locked(ifp, txq);
   7640 		/*
   7641 		 * There are still some upper layer processing which call
   7642 		 * ifp->if_start(). e.g. ALTQ
   7643 		 */
   7644 		if (wmq->wmq_id == 0) {
   7645 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7646 				wm_nq_start_locked(ifp);
   7647 		}
   7648 		WM_TX_UNLOCK(txq);
   7649 	}
   7650 
   7651 	DPRINTF(WM_DEBUG_RX,
   7652 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7653 
   7654 	if (!sc->sc_stopping) {
   7655 		WM_RX_LOCK(rxq);
   7656 		WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7657 		wm_rxeof(rxq);
   7658 		WM_RX_UNLOCK(rxq);
   7659 	}
   7660 
   7661 	if (sc->sc_type == WM_T_82574)
   7662 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7663 	else if (sc->sc_type == WM_T_82575)
   7664 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7665 	else
   7666 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7667 
   7668 	return 1;
   7669 }
   7670 
   7671 /*
   7672  * wm_linkintr_msix:
   7673  *
   7674  *	Interrupt service routine for link status change for MSI-X.
   7675  */
   7676 static int
   7677 wm_linkintr_msix(void *arg)
   7678 {
   7679 	struct wm_softc *sc = arg;
   7680 	uint32_t reg;
   7681 
   7682 	DPRINTF(WM_DEBUG_LINK,
   7683 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7684 
   7685 	reg = CSR_READ(sc, WMREG_ICR);
   7686 	WM_CORE_LOCK(sc);
   7687 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7688 		goto out;
   7689 
   7690 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7691 	wm_linkintr(sc, ICR_LSC);
   7692 
   7693 out:
   7694 	WM_CORE_UNLOCK(sc);
   7695 
   7696 	if (sc->sc_type == WM_T_82574)
   7697 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7698 	else if (sc->sc_type == WM_T_82575)
   7699 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7700 	else
   7701 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7702 
   7703 	return 1;
   7704 }
   7705 
   7706 /*
   7707  * Media related.
   7708  * GMII, SGMII, TBI (and SERDES)
   7709  */
   7710 
   7711 /* Common */
   7712 
   7713 /*
   7714  * wm_tbi_serdes_set_linkled:
   7715  *
   7716  *	Update the link LED on TBI and SERDES devices.
   7717  */
   7718 static void
   7719 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7720 {
   7721 
   7722 	if (sc->sc_tbi_linkup)
   7723 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7724 	else
   7725 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7726 
   7727 	/* 82540 or newer devices are active low */
   7728 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7729 
   7730 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7731 }
   7732 
   7733 /* GMII related */
   7734 
   7735 /*
   7736  * wm_gmii_reset:
   7737  *
   7738  *	Reset the PHY.
   7739  */
   7740 static void
   7741 wm_gmii_reset(struct wm_softc *sc)
   7742 {
   7743 	uint32_t reg;
   7744 	int rv;
   7745 
   7746 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7747 		device_xname(sc->sc_dev), __func__));
   7748 	/* get phy semaphore */
   7749 	switch (sc->sc_type) {
   7750 	case WM_T_82571:
   7751 	case WM_T_82572:
   7752 	case WM_T_82573:
   7753 	case WM_T_82574:
   7754 	case WM_T_82583:
   7755 		 /* XXX should get sw semaphore, too */
   7756 		rv = wm_get_swsm_semaphore(sc);
   7757 		break;
   7758 	case WM_T_82575:
   7759 	case WM_T_82576:
   7760 	case WM_T_82580:
   7761 	case WM_T_I350:
   7762 	case WM_T_I354:
   7763 	case WM_T_I210:
   7764 	case WM_T_I211:
   7765 	case WM_T_80003:
   7766 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7767 		break;
   7768 	case WM_T_ICH8:
   7769 	case WM_T_ICH9:
   7770 	case WM_T_ICH10:
   7771 	case WM_T_PCH:
   7772 	case WM_T_PCH2:
   7773 	case WM_T_PCH_LPT:
   7774 	case WM_T_PCH_SPT:
   7775 		rv = wm_get_swfwhw_semaphore(sc);
   7776 		break;
   7777 	default:
   7778 		/* nothing to do*/
   7779 		rv = 0;
   7780 		break;
   7781 	}
   7782 	if (rv != 0) {
   7783 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7784 		    __func__);
   7785 		return;
   7786 	}
   7787 
   7788 	switch (sc->sc_type) {
   7789 	case WM_T_82542_2_0:
   7790 	case WM_T_82542_2_1:
   7791 		/* null */
   7792 		break;
   7793 	case WM_T_82543:
   7794 		/*
   7795 		 * With 82543, we need to force speed and duplex on the MAC
   7796 		 * equal to what the PHY speed and duplex configuration is.
   7797 		 * In addition, we need to perform a hardware reset on the PHY
   7798 		 * to take it out of reset.
   7799 		 */
   7800 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7801 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7802 
   7803 		/* The PHY reset pin is active-low. */
   7804 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7805 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7806 		    CTRL_EXT_SWDPIN(4));
   7807 		reg |= CTRL_EXT_SWDPIO(4);
   7808 
   7809 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7810 		CSR_WRITE_FLUSH(sc);
   7811 		delay(10*1000);
   7812 
   7813 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7814 		CSR_WRITE_FLUSH(sc);
   7815 		delay(150);
   7816 #if 0
   7817 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7818 #endif
   7819 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7820 		break;
   7821 	case WM_T_82544:	/* reset 10000us */
   7822 	case WM_T_82540:
   7823 	case WM_T_82545:
   7824 	case WM_T_82545_3:
   7825 	case WM_T_82546:
   7826 	case WM_T_82546_3:
   7827 	case WM_T_82541:
   7828 	case WM_T_82541_2:
   7829 	case WM_T_82547:
   7830 	case WM_T_82547_2:
   7831 	case WM_T_82571:	/* reset 100us */
   7832 	case WM_T_82572:
   7833 	case WM_T_82573:
   7834 	case WM_T_82574:
   7835 	case WM_T_82575:
   7836 	case WM_T_82576:
   7837 	case WM_T_82580:
   7838 	case WM_T_I350:
   7839 	case WM_T_I354:
   7840 	case WM_T_I210:
   7841 	case WM_T_I211:
   7842 	case WM_T_82583:
   7843 	case WM_T_80003:
   7844 		/* generic reset */
   7845 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7846 		CSR_WRITE_FLUSH(sc);
   7847 		delay(20000);
   7848 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7849 		CSR_WRITE_FLUSH(sc);
   7850 		delay(20000);
   7851 
   7852 		if ((sc->sc_type == WM_T_82541)
   7853 		    || (sc->sc_type == WM_T_82541_2)
   7854 		    || (sc->sc_type == WM_T_82547)
   7855 		    || (sc->sc_type == WM_T_82547_2)) {
   7856 			/* workaround for igp are done in igp_reset() */
   7857 			/* XXX add code to set LED after phy reset */
   7858 		}
   7859 		break;
   7860 	case WM_T_ICH8:
   7861 	case WM_T_ICH9:
   7862 	case WM_T_ICH10:
   7863 	case WM_T_PCH:
   7864 	case WM_T_PCH2:
   7865 	case WM_T_PCH_LPT:
   7866 	case WM_T_PCH_SPT:
   7867 		/* generic reset */
   7868 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7869 		CSR_WRITE_FLUSH(sc);
   7870 		delay(100);
   7871 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7872 		CSR_WRITE_FLUSH(sc);
   7873 		delay(150);
   7874 		break;
   7875 	default:
   7876 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7877 		    __func__);
   7878 		break;
   7879 	}
   7880 
   7881 	/* release PHY semaphore */
   7882 	switch (sc->sc_type) {
   7883 	case WM_T_82571:
   7884 	case WM_T_82572:
   7885 	case WM_T_82573:
   7886 	case WM_T_82574:
   7887 	case WM_T_82583:
   7888 		 /* XXX should put sw semaphore, too */
   7889 		wm_put_swsm_semaphore(sc);
   7890 		break;
   7891 	case WM_T_82575:
   7892 	case WM_T_82576:
   7893 	case WM_T_82580:
   7894 	case WM_T_I350:
   7895 	case WM_T_I354:
   7896 	case WM_T_I210:
   7897 	case WM_T_I211:
   7898 	case WM_T_80003:
   7899 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7900 		break;
   7901 	case WM_T_ICH8:
   7902 	case WM_T_ICH9:
   7903 	case WM_T_ICH10:
   7904 	case WM_T_PCH:
   7905 	case WM_T_PCH2:
   7906 	case WM_T_PCH_LPT:
   7907 	case WM_T_PCH_SPT:
   7908 		wm_put_swfwhw_semaphore(sc);
   7909 		break;
   7910 	default:
   7911 		/* nothing to do */
   7912 		rv = 0;
   7913 		break;
   7914 	}
   7915 
   7916 	/* get_cfg_done */
   7917 	wm_get_cfg_done(sc);
   7918 
   7919 	/* extra setup */
   7920 	switch (sc->sc_type) {
   7921 	case WM_T_82542_2_0:
   7922 	case WM_T_82542_2_1:
   7923 	case WM_T_82543:
   7924 	case WM_T_82544:
   7925 	case WM_T_82540:
   7926 	case WM_T_82545:
   7927 	case WM_T_82545_3:
   7928 	case WM_T_82546:
   7929 	case WM_T_82546_3:
   7930 	case WM_T_82541_2:
   7931 	case WM_T_82547_2:
   7932 	case WM_T_82571:
   7933 	case WM_T_82572:
   7934 	case WM_T_82573:
   7935 	case WM_T_82575:
   7936 	case WM_T_82576:
   7937 	case WM_T_82580:
   7938 	case WM_T_I350:
   7939 	case WM_T_I354:
   7940 	case WM_T_I210:
   7941 	case WM_T_I211:
   7942 	case WM_T_80003:
   7943 		/* null */
   7944 		break;
   7945 	case WM_T_82574:
   7946 	case WM_T_82583:
   7947 		wm_lplu_d0_disable(sc);
   7948 		break;
   7949 	case WM_T_82541:
   7950 	case WM_T_82547:
   7951 		/* XXX Configure actively LED after PHY reset */
   7952 		break;
   7953 	case WM_T_ICH8:
   7954 	case WM_T_ICH9:
   7955 	case WM_T_ICH10:
   7956 	case WM_T_PCH:
   7957 	case WM_T_PCH2:
   7958 	case WM_T_PCH_LPT:
   7959 	case WM_T_PCH_SPT:
   7960 		/* Allow time for h/w to get to a quiescent state afer reset */
   7961 		delay(10*1000);
   7962 
   7963 		if (sc->sc_type == WM_T_PCH)
   7964 			wm_hv_phy_workaround_ich8lan(sc);
   7965 
   7966 		if (sc->sc_type == WM_T_PCH2)
   7967 			wm_lv_phy_workaround_ich8lan(sc);
   7968 
   7969 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7970 			/*
   7971 			 * dummy read to clear the phy wakeup bit after lcd
   7972 			 * reset
   7973 			 */
   7974 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7975 		}
   7976 
   7977 		/*
   7978 		 * XXX Configure the LCD with th extended configuration region
   7979 		 * in NVM
   7980 		 */
   7981 
   7982 		/* Disable D0 LPLU. */
   7983 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7984 			wm_lplu_d0_disable_pch(sc);
   7985 		else
   7986 			wm_lplu_d0_disable(sc);	/* ICH* */
   7987 		break;
   7988 	default:
   7989 		panic("%s: unknown type\n", __func__);
   7990 		break;
   7991 	}
   7992 }
   7993 
   7994 /*
   7995  * wm_get_phy_id_82575:
   7996  *
   7997  * Return PHY ID. Return -1 if it failed.
   7998  */
   7999 static int
   8000 wm_get_phy_id_82575(struct wm_softc *sc)
   8001 {
   8002 	uint32_t reg;
   8003 	int phyid = -1;
   8004 
   8005 	/* XXX */
   8006 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8007 		return -1;
   8008 
   8009 	if (wm_sgmii_uses_mdio(sc)) {
   8010 		switch (sc->sc_type) {
   8011 		case WM_T_82575:
   8012 		case WM_T_82576:
   8013 			reg = CSR_READ(sc, WMREG_MDIC);
   8014 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8015 			break;
   8016 		case WM_T_82580:
   8017 		case WM_T_I350:
   8018 		case WM_T_I354:
   8019 		case WM_T_I210:
   8020 		case WM_T_I211:
   8021 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8022 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8023 			break;
   8024 		default:
   8025 			return -1;
   8026 		}
   8027 	}
   8028 
   8029 	return phyid;
   8030 }
   8031 
   8032 
   8033 /*
   8034  * wm_gmii_mediainit:
   8035  *
   8036  *	Initialize media for use on 1000BASE-T devices.
   8037  */
   8038 static void
   8039 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8040 {
   8041 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8042 	struct mii_data *mii = &sc->sc_mii;
   8043 	uint32_t reg;
   8044 
   8045 	/* We have GMII. */
   8046 	sc->sc_flags |= WM_F_HAS_MII;
   8047 
   8048 	if (sc->sc_type == WM_T_80003)
   8049 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8050 	else
   8051 		sc->sc_tipg = TIPG_1000T_DFLT;
   8052 
   8053 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8054 	if ((sc->sc_type == WM_T_82580)
   8055 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8056 	    || (sc->sc_type == WM_T_I211)) {
   8057 		reg = CSR_READ(sc, WMREG_PHPM);
   8058 		reg &= ~PHPM_GO_LINK_D;
   8059 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8060 	}
   8061 
   8062 	/*
   8063 	 * Let the chip set speed/duplex on its own based on
   8064 	 * signals from the PHY.
   8065 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8066 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8067 	 */
   8068 	sc->sc_ctrl |= CTRL_SLU;
   8069 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8070 
   8071 	/* Initialize our media structures and probe the GMII. */
   8072 	mii->mii_ifp = ifp;
   8073 
   8074 	/*
   8075 	 * Determine the PHY access method.
   8076 	 *
   8077 	 *  For SGMII, use SGMII specific method.
   8078 	 *
   8079 	 *  For some devices, we can determine the PHY access method
   8080 	 * from sc_type.
   8081 	 *
   8082 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8083 	 * access  method by sc_type, so use the PCI product ID for some
   8084 	 * devices.
   8085 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8086 	 * can't detect, then use bm's method.
   8087 	 */
   8088 	switch (prodid) {
   8089 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8090 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8091 		/* 82577 */
   8092 		sc->sc_phytype = WMPHY_82577;
   8093 		break;
   8094 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8095 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8096 		/* 82578 */
   8097 		sc->sc_phytype = WMPHY_82578;
   8098 		break;
   8099 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8100 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8101 		/* 82579 */
   8102 		sc->sc_phytype = WMPHY_82579;
   8103 		break;
   8104 	case PCI_PRODUCT_INTEL_82801I_BM:
   8105 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8106 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8107 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8108 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8109 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8110 		/* 82567 */
   8111 		sc->sc_phytype = WMPHY_BM;
   8112 		mii->mii_readreg = wm_gmii_bm_readreg;
   8113 		mii->mii_writereg = wm_gmii_bm_writereg;
   8114 		break;
   8115 	default:
   8116 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8117 		    && !wm_sgmii_uses_mdio(sc)){
   8118 			/* SGMII */
   8119 			mii->mii_readreg = wm_sgmii_readreg;
   8120 			mii->mii_writereg = wm_sgmii_writereg;
   8121 		} else if (sc->sc_type >= WM_T_80003) {
   8122 			/* 80003 */
   8123 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8124 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8125 		} else if (sc->sc_type >= WM_T_I210) {
   8126 			/* I210 and I211 */
   8127 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8128 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8129 		} else if (sc->sc_type >= WM_T_82580) {
   8130 			/* 82580, I350 and I354 */
   8131 			sc->sc_phytype = WMPHY_82580;
   8132 			mii->mii_readreg = wm_gmii_82580_readreg;
   8133 			mii->mii_writereg = wm_gmii_82580_writereg;
   8134 		} else if (sc->sc_type >= WM_T_82544) {
   8135 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8136 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8137 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8138 		} else {
   8139 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8140 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8141 		}
   8142 		break;
   8143 	}
   8144 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8145 		/* All PCH* use _hv_ */
   8146 		mii->mii_readreg = wm_gmii_hv_readreg;
   8147 		mii->mii_writereg = wm_gmii_hv_writereg;
   8148 	}
   8149 	mii->mii_statchg = wm_gmii_statchg;
   8150 
   8151 	wm_gmii_reset(sc);
   8152 
   8153 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8154 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8155 	    wm_gmii_mediastatus);
   8156 
   8157 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8158 	    || (sc->sc_type == WM_T_82580)
   8159 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8160 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8161 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8162 			/* Attach only one port */
   8163 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8164 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8165 		} else {
   8166 			int i, id;
   8167 			uint32_t ctrl_ext;
   8168 
   8169 			id = wm_get_phy_id_82575(sc);
   8170 			if (id != -1) {
   8171 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8172 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8173 			}
   8174 			if ((id == -1)
   8175 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8176 				/* Power on sgmii phy if it is disabled */
   8177 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8178 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8179 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8180 				CSR_WRITE_FLUSH(sc);
   8181 				delay(300*1000); /* XXX too long */
   8182 
   8183 				/* from 1 to 8 */
   8184 				for (i = 1; i < 8; i++)
   8185 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8186 					    0xffffffff, i, MII_OFFSET_ANY,
   8187 					    MIIF_DOPAUSE);
   8188 
   8189 				/* restore previous sfp cage power state */
   8190 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8191 			}
   8192 		}
   8193 	} else {
   8194 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8195 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8196 	}
   8197 
   8198 	/*
   8199 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8200 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8201 	 */
   8202 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8203 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8204 		wm_set_mdio_slow_mode_hv(sc);
   8205 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8206 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8207 	}
   8208 
   8209 	/*
   8210 	 * (For ICH8 variants)
   8211 	 * If PHY detection failed, use BM's r/w function and retry.
   8212 	 */
   8213 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8214 		/* if failed, retry with *_bm_* */
   8215 		mii->mii_readreg = wm_gmii_bm_readreg;
   8216 		mii->mii_writereg = wm_gmii_bm_writereg;
   8217 
   8218 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8219 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8220 	}
   8221 
   8222 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8223 		/* Any PHY wasn't find */
   8224 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8225 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8226 		sc->sc_phytype = WMPHY_NONE;
   8227 	} else {
   8228 		/*
   8229 		 * PHY Found!
   8230 		 * Check PHY type.
   8231 		 */
   8232 		uint32_t model;
   8233 		struct mii_softc *child;
   8234 
   8235 		child = LIST_FIRST(&mii->mii_phys);
   8236 		model = child->mii_mpd_model;
   8237 		if (model == MII_MODEL_yyINTEL_I82566)
   8238 			sc->sc_phytype = WMPHY_IGP_3;
   8239 
   8240 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8241 	}
   8242 }
   8243 
   8244 /*
   8245  * wm_gmii_mediachange:	[ifmedia interface function]
   8246  *
   8247  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8248  */
   8249 static int
   8250 wm_gmii_mediachange(struct ifnet *ifp)
   8251 {
   8252 	struct wm_softc *sc = ifp->if_softc;
   8253 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8254 	int rc;
   8255 
   8256 	if ((ifp->if_flags & IFF_UP) == 0)
   8257 		return 0;
   8258 
   8259 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8260 	sc->sc_ctrl |= CTRL_SLU;
   8261 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8262 	    || (sc->sc_type > WM_T_82543)) {
   8263 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8264 	} else {
   8265 		sc->sc_ctrl &= ~CTRL_ASDE;
   8266 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8267 		if (ife->ifm_media & IFM_FDX)
   8268 			sc->sc_ctrl |= CTRL_FD;
   8269 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8270 		case IFM_10_T:
   8271 			sc->sc_ctrl |= CTRL_SPEED_10;
   8272 			break;
   8273 		case IFM_100_TX:
   8274 			sc->sc_ctrl |= CTRL_SPEED_100;
   8275 			break;
   8276 		case IFM_1000_T:
   8277 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8278 			break;
   8279 		default:
   8280 			panic("wm_gmii_mediachange: bad media 0x%x",
   8281 			    ife->ifm_media);
   8282 		}
   8283 	}
   8284 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8285 	if (sc->sc_type <= WM_T_82543)
   8286 		wm_gmii_reset(sc);
   8287 
   8288 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8289 		return 0;
   8290 	return rc;
   8291 }
   8292 
   8293 /*
   8294  * wm_gmii_mediastatus:	[ifmedia interface function]
   8295  *
   8296  *	Get the current interface media status on a 1000BASE-T device.
   8297  */
   8298 static void
   8299 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8300 {
   8301 	struct wm_softc *sc = ifp->if_softc;
   8302 
   8303 	ether_mediastatus(ifp, ifmr);
   8304 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8305 	    | sc->sc_flowflags;
   8306 }
   8307 
   8308 #define	MDI_IO		CTRL_SWDPIN(2)
   8309 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8310 #define	MDI_CLK		CTRL_SWDPIN(3)
   8311 
   8312 static void
   8313 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8314 {
   8315 	uint32_t i, v;
   8316 
   8317 	v = CSR_READ(sc, WMREG_CTRL);
   8318 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8319 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8320 
   8321 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8322 		if (data & i)
   8323 			v |= MDI_IO;
   8324 		else
   8325 			v &= ~MDI_IO;
   8326 		CSR_WRITE(sc, WMREG_CTRL, v);
   8327 		CSR_WRITE_FLUSH(sc);
   8328 		delay(10);
   8329 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8330 		CSR_WRITE_FLUSH(sc);
   8331 		delay(10);
   8332 		CSR_WRITE(sc, WMREG_CTRL, v);
   8333 		CSR_WRITE_FLUSH(sc);
   8334 		delay(10);
   8335 	}
   8336 }
   8337 
   8338 static uint32_t
   8339 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8340 {
   8341 	uint32_t v, i, data = 0;
   8342 
   8343 	v = CSR_READ(sc, WMREG_CTRL);
   8344 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8345 	v |= CTRL_SWDPIO(3);
   8346 
   8347 	CSR_WRITE(sc, WMREG_CTRL, v);
   8348 	CSR_WRITE_FLUSH(sc);
   8349 	delay(10);
   8350 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8351 	CSR_WRITE_FLUSH(sc);
   8352 	delay(10);
   8353 	CSR_WRITE(sc, WMREG_CTRL, v);
   8354 	CSR_WRITE_FLUSH(sc);
   8355 	delay(10);
   8356 
   8357 	for (i = 0; i < 16; i++) {
   8358 		data <<= 1;
   8359 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8360 		CSR_WRITE_FLUSH(sc);
   8361 		delay(10);
   8362 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8363 			data |= 1;
   8364 		CSR_WRITE(sc, WMREG_CTRL, v);
   8365 		CSR_WRITE_FLUSH(sc);
   8366 		delay(10);
   8367 	}
   8368 
   8369 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8370 	CSR_WRITE_FLUSH(sc);
   8371 	delay(10);
   8372 	CSR_WRITE(sc, WMREG_CTRL, v);
   8373 	CSR_WRITE_FLUSH(sc);
   8374 	delay(10);
   8375 
   8376 	return data;
   8377 }
   8378 
   8379 #undef MDI_IO
   8380 #undef MDI_DIR
   8381 #undef MDI_CLK
   8382 
   8383 /*
   8384  * wm_gmii_i82543_readreg:	[mii interface function]
   8385  *
   8386  *	Read a PHY register on the GMII (i82543 version).
   8387  */
   8388 static int
   8389 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8390 {
   8391 	struct wm_softc *sc = device_private(self);
   8392 	int rv;
   8393 
   8394 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8395 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8396 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8397 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8398 
   8399 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8400 	    device_xname(sc->sc_dev), phy, reg, rv));
   8401 
   8402 	return rv;
   8403 }
   8404 
   8405 /*
   8406  * wm_gmii_i82543_writereg:	[mii interface function]
   8407  *
   8408  *	Write a PHY register on the GMII (i82543 version).
   8409  */
   8410 static void
   8411 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8412 {
   8413 	struct wm_softc *sc = device_private(self);
   8414 
   8415 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8416 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8417 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8418 	    (MII_COMMAND_START << 30), 32);
   8419 }
   8420 
   8421 /*
   8422  * wm_gmii_i82544_readreg:	[mii interface function]
   8423  *
   8424  *	Read a PHY register on the GMII.
   8425  */
   8426 static int
   8427 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8428 {
   8429 	struct wm_softc *sc = device_private(self);
   8430 	uint32_t mdic = 0;
   8431 	int i, rv;
   8432 
   8433 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8434 	    MDIC_REGADD(reg));
   8435 
   8436 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8437 		mdic = CSR_READ(sc, WMREG_MDIC);
   8438 		if (mdic & MDIC_READY)
   8439 			break;
   8440 		delay(50);
   8441 	}
   8442 
   8443 	if ((mdic & MDIC_READY) == 0) {
   8444 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8445 		    device_xname(sc->sc_dev), phy, reg);
   8446 		rv = 0;
   8447 	} else if (mdic & MDIC_E) {
   8448 #if 0 /* This is normal if no PHY is present. */
   8449 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8450 		    device_xname(sc->sc_dev), phy, reg);
   8451 #endif
   8452 		rv = 0;
   8453 	} else {
   8454 		rv = MDIC_DATA(mdic);
   8455 		if (rv == 0xffff)
   8456 			rv = 0;
   8457 	}
   8458 
   8459 	return rv;
   8460 }
   8461 
   8462 /*
   8463  * wm_gmii_i82544_writereg:	[mii interface function]
   8464  *
   8465  *	Write a PHY register on the GMII.
   8466  */
   8467 static void
   8468 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8469 {
   8470 	struct wm_softc *sc = device_private(self);
   8471 	uint32_t mdic = 0;
   8472 	int i;
   8473 
   8474 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8475 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8476 
   8477 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8478 		mdic = CSR_READ(sc, WMREG_MDIC);
   8479 		if (mdic & MDIC_READY)
   8480 			break;
   8481 		delay(50);
   8482 	}
   8483 
   8484 	if ((mdic & MDIC_READY) == 0)
   8485 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8486 		    device_xname(sc->sc_dev), phy, reg);
   8487 	else if (mdic & MDIC_E)
   8488 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8489 		    device_xname(sc->sc_dev), phy, reg);
   8490 }
   8491 
   8492 /*
   8493  * wm_gmii_i80003_readreg:	[mii interface function]
   8494  *
   8495  *	Read a PHY register on the kumeran
   8496  * This could be handled by the PHY layer if we didn't have to lock the
   8497  * ressource ...
   8498  */
   8499 static int
   8500 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8501 {
   8502 	struct wm_softc *sc = device_private(self);
   8503 	int sem;
   8504 	int rv;
   8505 
   8506 	if (phy != 1) /* only one PHY on kumeran bus */
   8507 		return 0;
   8508 
   8509 	sem = swfwphysem[sc->sc_funcid];
   8510 	if (wm_get_swfw_semaphore(sc, sem)) {
   8511 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8512 		    __func__);
   8513 		return 0;
   8514 	}
   8515 
   8516 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8517 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8518 		    reg >> GG82563_PAGE_SHIFT);
   8519 	} else {
   8520 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8521 		    reg >> GG82563_PAGE_SHIFT);
   8522 	}
   8523 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8524 	delay(200);
   8525 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8526 	delay(200);
   8527 
   8528 	wm_put_swfw_semaphore(sc, sem);
   8529 	return rv;
   8530 }
   8531 
   8532 /*
   8533  * wm_gmii_i80003_writereg:	[mii interface function]
   8534  *
   8535  *	Write a PHY register on the kumeran.
   8536  * This could be handled by the PHY layer if we didn't have to lock the
   8537  * ressource ...
   8538  */
   8539 static void
   8540 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8541 {
   8542 	struct wm_softc *sc = device_private(self);
   8543 	int sem;
   8544 
   8545 	if (phy != 1) /* only one PHY on kumeran bus */
   8546 		return;
   8547 
   8548 	sem = swfwphysem[sc->sc_funcid];
   8549 	if (wm_get_swfw_semaphore(sc, sem)) {
   8550 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8551 		    __func__);
   8552 		return;
   8553 	}
   8554 
   8555 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8556 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8557 		    reg >> GG82563_PAGE_SHIFT);
   8558 	} else {
   8559 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8560 		    reg >> GG82563_PAGE_SHIFT);
   8561 	}
   8562 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8563 	delay(200);
   8564 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8565 	delay(200);
   8566 
   8567 	wm_put_swfw_semaphore(sc, sem);
   8568 }
   8569 
   8570 /*
   8571  * wm_gmii_bm_readreg:	[mii interface function]
   8572  *
   8573  *	Read a PHY register on the kumeran
   8574  * This could be handled by the PHY layer if we didn't have to lock the
   8575  * ressource ...
   8576  */
   8577 static int
   8578 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8579 {
   8580 	struct wm_softc *sc = device_private(self);
   8581 	int sem;
   8582 	int rv;
   8583 
   8584 	sem = swfwphysem[sc->sc_funcid];
   8585 	if (wm_get_swfw_semaphore(sc, sem)) {
   8586 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8587 		    __func__);
   8588 		return 0;
   8589 	}
   8590 
   8591 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8592 		if (phy == 1)
   8593 			wm_gmii_i82544_writereg(self, phy,
   8594 			    MII_IGPHY_PAGE_SELECT, reg);
   8595 		else
   8596 			wm_gmii_i82544_writereg(self, phy,
   8597 			    GG82563_PHY_PAGE_SELECT,
   8598 			    reg >> GG82563_PAGE_SHIFT);
   8599 	}
   8600 
   8601 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8602 	wm_put_swfw_semaphore(sc, sem);
   8603 	return rv;
   8604 }
   8605 
   8606 /*
   8607  * wm_gmii_bm_writereg:	[mii interface function]
   8608  *
   8609  *	Write a PHY register on the kumeran.
   8610  * This could be handled by the PHY layer if we didn't have to lock the
   8611  * ressource ...
   8612  */
   8613 static void
   8614 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8615 {
   8616 	struct wm_softc *sc = device_private(self);
   8617 	int sem;
   8618 
   8619 	sem = swfwphysem[sc->sc_funcid];
   8620 	if (wm_get_swfw_semaphore(sc, sem)) {
   8621 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8622 		    __func__);
   8623 		return;
   8624 	}
   8625 
   8626 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8627 		if (phy == 1)
   8628 			wm_gmii_i82544_writereg(self, phy,
   8629 			    MII_IGPHY_PAGE_SELECT, reg);
   8630 		else
   8631 			wm_gmii_i82544_writereg(self, phy,
   8632 			    GG82563_PHY_PAGE_SELECT,
   8633 			    reg >> GG82563_PAGE_SHIFT);
   8634 	}
   8635 
   8636 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8637 	wm_put_swfw_semaphore(sc, sem);
   8638 }
   8639 
   8640 static void
   8641 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8642 {
   8643 	struct wm_softc *sc = device_private(self);
   8644 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8645 	uint16_t wuce;
   8646 
   8647 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8648 	if (sc->sc_type == WM_T_PCH) {
   8649 		/* XXX e1000 driver do nothing... why? */
   8650 	}
   8651 
   8652 	/* Set page 769 */
   8653 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8654 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8655 
   8656 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8657 
   8658 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8659 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8660 	    wuce | BM_WUC_ENABLE_BIT);
   8661 
   8662 	/* Select page 800 */
   8663 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8664 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8665 
   8666 	/* Write page 800 */
   8667 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8668 
   8669 	if (rd)
   8670 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8671 	else
   8672 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8673 
   8674 	/* Set page 769 */
   8675 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8676 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8677 
   8678 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8679 }
   8680 
   8681 /*
   8682  * wm_gmii_hv_readreg:	[mii interface function]
   8683  *
   8684  *	Read a PHY register on the kumeran
   8685  * This could be handled by the PHY layer if we didn't have to lock the
   8686  * ressource ...
   8687  */
   8688 static int
   8689 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8690 {
   8691 	struct wm_softc *sc = device_private(self);
   8692 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8693 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8694 	uint16_t val;
   8695 	int rv;
   8696 
   8697 	if (wm_get_swfwhw_semaphore(sc)) {
   8698 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8699 		    __func__);
   8700 		return 0;
   8701 	}
   8702 
   8703 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8704 	if (sc->sc_phytype == WMPHY_82577) {
   8705 		/* XXX must write */
   8706 	}
   8707 
   8708 	/* Page 800 works differently than the rest so it has its own func */
   8709 	if (page == BM_WUC_PAGE) {
   8710 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8711 		return val;
   8712 	}
   8713 
   8714 	/*
   8715 	 * Lower than page 768 works differently than the rest so it has its
   8716 	 * own func
   8717 	 */
   8718 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8719 		printf("gmii_hv_readreg!!!\n");
   8720 		return 0;
   8721 	}
   8722 
   8723 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8724 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8725 		    page << BME1000_PAGE_SHIFT);
   8726 	}
   8727 
   8728 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8729 	wm_put_swfwhw_semaphore(sc);
   8730 	return rv;
   8731 }
   8732 
   8733 /*
   8734  * wm_gmii_hv_writereg:	[mii interface function]
   8735  *
   8736  *	Write a PHY register on the kumeran.
   8737  * This could be handled by the PHY layer if we didn't have to lock the
   8738  * ressource ...
   8739  */
   8740 static void
   8741 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8742 {
   8743 	struct wm_softc *sc = device_private(self);
   8744 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8745 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8746 
   8747 	if (wm_get_swfwhw_semaphore(sc)) {
   8748 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8749 		    __func__);
   8750 		return;
   8751 	}
   8752 
   8753 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8754 
   8755 	/* Page 800 works differently than the rest so it has its own func */
   8756 	if (page == BM_WUC_PAGE) {
   8757 		uint16_t tmp;
   8758 
   8759 		tmp = val;
   8760 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8761 		return;
   8762 	}
   8763 
   8764 	/*
   8765 	 * Lower than page 768 works differently than the rest so it has its
   8766 	 * own func
   8767 	 */
   8768 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8769 		printf("gmii_hv_writereg!!!\n");
   8770 		return;
   8771 	}
   8772 
   8773 	/*
   8774 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8775 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8776 	 */
   8777 
   8778 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8779 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8780 		    page << BME1000_PAGE_SHIFT);
   8781 	}
   8782 
   8783 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8784 	wm_put_swfwhw_semaphore(sc);
   8785 }
   8786 
   8787 /*
   8788  * wm_gmii_82580_readreg:	[mii interface function]
   8789  *
   8790  *	Read a PHY register on the 82580 and I350.
   8791  * This could be handled by the PHY layer if we didn't have to lock the
   8792  * ressource ...
   8793  */
   8794 static int
   8795 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8796 {
   8797 	struct wm_softc *sc = device_private(self);
   8798 	int sem;
   8799 	int rv;
   8800 
   8801 	sem = swfwphysem[sc->sc_funcid];
   8802 	if (wm_get_swfw_semaphore(sc, sem)) {
   8803 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8804 		    __func__);
   8805 		return 0;
   8806 	}
   8807 
   8808 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8809 
   8810 	wm_put_swfw_semaphore(sc, sem);
   8811 	return rv;
   8812 }
   8813 
   8814 /*
   8815  * wm_gmii_82580_writereg:	[mii interface function]
   8816  *
   8817  *	Write a PHY register on the 82580 and I350.
   8818  * This could be handled by the PHY layer if we didn't have to lock the
   8819  * ressource ...
   8820  */
   8821 static void
   8822 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8823 {
   8824 	struct wm_softc *sc = device_private(self);
   8825 	int sem;
   8826 
   8827 	sem = swfwphysem[sc->sc_funcid];
   8828 	if (wm_get_swfw_semaphore(sc, sem)) {
   8829 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8830 		    __func__);
   8831 		return;
   8832 	}
   8833 
   8834 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8835 
   8836 	wm_put_swfw_semaphore(sc, sem);
   8837 }
   8838 
   8839 /*
   8840  * wm_gmii_gs40g_readreg:	[mii interface function]
   8841  *
   8842  *	Read a PHY register on the I2100 and I211.
   8843  * This could be handled by the PHY layer if we didn't have to lock the
   8844  * ressource ...
   8845  */
   8846 static int
   8847 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8848 {
   8849 	struct wm_softc *sc = device_private(self);
   8850 	int sem;
   8851 	int page, offset;
   8852 	int rv;
   8853 
   8854 	/* Acquire semaphore */
   8855 	sem = swfwphysem[sc->sc_funcid];
   8856 	if (wm_get_swfw_semaphore(sc, sem)) {
   8857 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8858 		    __func__);
   8859 		return 0;
   8860 	}
   8861 
   8862 	/* Page select */
   8863 	page = reg >> GS40G_PAGE_SHIFT;
   8864 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8865 
   8866 	/* Read reg */
   8867 	offset = reg & GS40G_OFFSET_MASK;
   8868 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8869 
   8870 	wm_put_swfw_semaphore(sc, sem);
   8871 	return rv;
   8872 }
   8873 
   8874 /*
   8875  * wm_gmii_gs40g_writereg:	[mii interface function]
   8876  *
   8877  *	Write a PHY register on the I210 and I211.
   8878  * This could be handled by the PHY layer if we didn't have to lock the
   8879  * ressource ...
   8880  */
   8881 static void
   8882 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8883 {
   8884 	struct wm_softc *sc = device_private(self);
   8885 	int sem;
   8886 	int page, offset;
   8887 
   8888 	/* Acquire semaphore */
   8889 	sem = swfwphysem[sc->sc_funcid];
   8890 	if (wm_get_swfw_semaphore(sc, sem)) {
   8891 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8892 		    __func__);
   8893 		return;
   8894 	}
   8895 
   8896 	/* Page select */
   8897 	page = reg >> GS40G_PAGE_SHIFT;
   8898 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8899 
   8900 	/* Write reg */
   8901 	offset = reg & GS40G_OFFSET_MASK;
   8902 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8903 
   8904 	/* Release semaphore */
   8905 	wm_put_swfw_semaphore(sc, sem);
   8906 }
   8907 
   8908 /*
   8909  * wm_gmii_statchg:	[mii interface function]
   8910  *
   8911  *	Callback from MII layer when media changes.
   8912  */
   8913 static void
   8914 wm_gmii_statchg(struct ifnet *ifp)
   8915 {
   8916 	struct wm_softc *sc = ifp->if_softc;
   8917 	struct mii_data *mii = &sc->sc_mii;
   8918 
   8919 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8920 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8921 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8922 
   8923 	/*
   8924 	 * Get flow control negotiation result.
   8925 	 */
   8926 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8927 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8928 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8929 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8930 	}
   8931 
   8932 	if (sc->sc_flowflags & IFM_FLOW) {
   8933 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8934 			sc->sc_ctrl |= CTRL_TFCE;
   8935 			sc->sc_fcrtl |= FCRTL_XONE;
   8936 		}
   8937 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8938 			sc->sc_ctrl |= CTRL_RFCE;
   8939 	}
   8940 
   8941 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8942 		DPRINTF(WM_DEBUG_LINK,
   8943 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8944 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8945 	} else {
   8946 		DPRINTF(WM_DEBUG_LINK,
   8947 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8948 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8949 	}
   8950 
   8951 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8952 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8953 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8954 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8955 	if (sc->sc_type == WM_T_80003) {
   8956 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8957 		case IFM_1000_T:
   8958 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8959 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8960 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8961 			break;
   8962 		default:
   8963 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8964 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8965 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8966 			break;
   8967 		}
   8968 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8969 	}
   8970 }
   8971 
   8972 /*
   8973  * wm_kmrn_readreg:
   8974  *
   8975  *	Read a kumeran register
   8976  */
   8977 static int
   8978 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8979 {
   8980 	int rv;
   8981 
   8982 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8983 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8984 			aprint_error_dev(sc->sc_dev,
   8985 			    "%s: failed to get semaphore\n", __func__);
   8986 			return 0;
   8987 		}
   8988 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8989 		if (wm_get_swfwhw_semaphore(sc)) {
   8990 			aprint_error_dev(sc->sc_dev,
   8991 			    "%s: failed to get semaphore\n", __func__);
   8992 			return 0;
   8993 		}
   8994 	}
   8995 
   8996 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8997 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8998 	    KUMCTRLSTA_REN);
   8999 	CSR_WRITE_FLUSH(sc);
   9000 	delay(2);
   9001 
   9002 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9003 
   9004 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9005 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9006 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9007 		wm_put_swfwhw_semaphore(sc);
   9008 
   9009 	return rv;
   9010 }
   9011 
   9012 /*
   9013  * wm_kmrn_writereg:
   9014  *
   9015  *	Write a kumeran register
   9016  */
   9017 static void
   9018 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9019 {
   9020 
   9021 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9022 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9023 			aprint_error_dev(sc->sc_dev,
   9024 			    "%s: failed to get semaphore\n", __func__);
   9025 			return;
   9026 		}
   9027 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9028 		if (wm_get_swfwhw_semaphore(sc)) {
   9029 			aprint_error_dev(sc->sc_dev,
   9030 			    "%s: failed to get semaphore\n", __func__);
   9031 			return;
   9032 		}
   9033 	}
   9034 
   9035 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9036 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9037 	    (val & KUMCTRLSTA_MASK));
   9038 
   9039 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9040 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9041 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9042 		wm_put_swfwhw_semaphore(sc);
   9043 }
   9044 
   9045 /* SGMII related */
   9046 
   9047 /*
   9048  * wm_sgmii_uses_mdio
   9049  *
   9050  * Check whether the transaction is to the internal PHY or the external
   9051  * MDIO interface. Return true if it's MDIO.
   9052  */
   9053 static bool
   9054 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9055 {
   9056 	uint32_t reg;
   9057 	bool ismdio = false;
   9058 
   9059 	switch (sc->sc_type) {
   9060 	case WM_T_82575:
   9061 	case WM_T_82576:
   9062 		reg = CSR_READ(sc, WMREG_MDIC);
   9063 		ismdio = ((reg & MDIC_DEST) != 0);
   9064 		break;
   9065 	case WM_T_82580:
   9066 	case WM_T_I350:
   9067 	case WM_T_I354:
   9068 	case WM_T_I210:
   9069 	case WM_T_I211:
   9070 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9071 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9072 		break;
   9073 	default:
   9074 		break;
   9075 	}
   9076 
   9077 	return ismdio;
   9078 }
   9079 
   9080 /*
   9081  * wm_sgmii_readreg:	[mii interface function]
   9082  *
   9083  *	Read a PHY register on the SGMII
   9084  * This could be handled by the PHY layer if we didn't have to lock the
   9085  * ressource ...
   9086  */
   9087 static int
   9088 wm_sgmii_readreg(device_t self, int phy, int reg)
   9089 {
   9090 	struct wm_softc *sc = device_private(self);
   9091 	uint32_t i2ccmd;
   9092 	int i, rv;
   9093 
   9094 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9095 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9096 		    __func__);
   9097 		return 0;
   9098 	}
   9099 
   9100 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9101 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9102 	    | I2CCMD_OPCODE_READ;
   9103 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9104 
   9105 	/* Poll the ready bit */
   9106 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9107 		delay(50);
   9108 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9109 		if (i2ccmd & I2CCMD_READY)
   9110 			break;
   9111 	}
   9112 	if ((i2ccmd & I2CCMD_READY) == 0)
   9113 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9114 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9115 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9116 
   9117 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9118 
   9119 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9120 	return rv;
   9121 }
   9122 
   9123 /*
   9124  * wm_sgmii_writereg:	[mii interface function]
   9125  *
   9126  *	Write a PHY register on the SGMII.
   9127  * This could be handled by the PHY layer if we didn't have to lock the
   9128  * ressource ...
   9129  */
   9130 static void
   9131 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9132 {
   9133 	struct wm_softc *sc = device_private(self);
   9134 	uint32_t i2ccmd;
   9135 	int i;
   9136 	int val_swapped;
   9137 
   9138 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9139 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9140 		    __func__);
   9141 		return;
   9142 	}
   9143 	/* Swap the data bytes for the I2C interface */
   9144 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9145 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9146 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9147 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9148 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9149 
   9150 	/* Poll the ready bit */
   9151 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9152 		delay(50);
   9153 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9154 		if (i2ccmd & I2CCMD_READY)
   9155 			break;
   9156 	}
   9157 	if ((i2ccmd & I2CCMD_READY) == 0)
   9158 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9159 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9160 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9161 
   9162 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9163 }
   9164 
   9165 /* TBI related */
   9166 
   9167 /*
   9168  * wm_tbi_mediainit:
   9169  *
   9170  *	Initialize media for use on 1000BASE-X devices.
   9171  */
   9172 static void
   9173 wm_tbi_mediainit(struct wm_softc *sc)
   9174 {
   9175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9176 	const char *sep = "";
   9177 
   9178 	if (sc->sc_type < WM_T_82543)
   9179 		sc->sc_tipg = TIPG_WM_DFLT;
   9180 	else
   9181 		sc->sc_tipg = TIPG_LG_DFLT;
   9182 
   9183 	sc->sc_tbi_serdes_anegticks = 5;
   9184 
   9185 	/* Initialize our media structures */
   9186 	sc->sc_mii.mii_ifp = ifp;
   9187 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9188 
   9189 	if ((sc->sc_type >= WM_T_82575)
   9190 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9191 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9192 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9193 	else
   9194 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9195 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9196 
   9197 	/*
   9198 	 * SWD Pins:
   9199 	 *
   9200 	 *	0 = Link LED (output)
   9201 	 *	1 = Loss Of Signal (input)
   9202 	 */
   9203 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9204 
   9205 	/* XXX Perhaps this is only for TBI */
   9206 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9207 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9208 
   9209 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9210 		sc->sc_ctrl &= ~CTRL_LRST;
   9211 
   9212 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9213 
   9214 #define	ADD(ss, mm, dd)							\
   9215 do {									\
   9216 	aprint_normal("%s%s", sep, ss);					\
   9217 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9218 	sep = ", ";							\
   9219 } while (/*CONSTCOND*/0)
   9220 
   9221 	aprint_normal_dev(sc->sc_dev, "");
   9222 
   9223 	/* Only 82545 is LX */
   9224 	if (sc->sc_type == WM_T_82545) {
   9225 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9226 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9227 	} else {
   9228 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9229 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9230 	}
   9231 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9232 	aprint_normal("\n");
   9233 
   9234 #undef ADD
   9235 
   9236 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9237 }
   9238 
   9239 /*
   9240  * wm_tbi_mediachange:	[ifmedia interface function]
   9241  *
   9242  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9243  */
   9244 static int
   9245 wm_tbi_mediachange(struct ifnet *ifp)
   9246 {
   9247 	struct wm_softc *sc = ifp->if_softc;
   9248 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9249 	uint32_t status;
   9250 	int i;
   9251 
   9252 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9253 		/* XXX need some work for >= 82571 and < 82575 */
   9254 		if (sc->sc_type < WM_T_82575)
   9255 			return 0;
   9256 	}
   9257 
   9258 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9259 	    || (sc->sc_type >= WM_T_82575))
   9260 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9261 
   9262 	sc->sc_ctrl &= ~CTRL_LRST;
   9263 	sc->sc_txcw = TXCW_ANE;
   9264 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9265 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9266 	else if (ife->ifm_media & IFM_FDX)
   9267 		sc->sc_txcw |= TXCW_FD;
   9268 	else
   9269 		sc->sc_txcw |= TXCW_HD;
   9270 
   9271 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9272 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9273 
   9274 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9275 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9276 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9277 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9278 	CSR_WRITE_FLUSH(sc);
   9279 	delay(1000);
   9280 
   9281 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9282 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9283 
   9284 	/*
   9285 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9286 	 * optics detect a signal, 0 if they don't.
   9287 	 */
   9288 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9289 		/* Have signal; wait for the link to come up. */
   9290 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9291 			delay(10000);
   9292 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9293 				break;
   9294 		}
   9295 
   9296 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9297 			    device_xname(sc->sc_dev),i));
   9298 
   9299 		status = CSR_READ(sc, WMREG_STATUS);
   9300 		DPRINTF(WM_DEBUG_LINK,
   9301 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9302 			device_xname(sc->sc_dev),status, STATUS_LU));
   9303 		if (status & STATUS_LU) {
   9304 			/* Link is up. */
   9305 			DPRINTF(WM_DEBUG_LINK,
   9306 			    ("%s: LINK: set media -> link up %s\n",
   9307 			    device_xname(sc->sc_dev),
   9308 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9309 
   9310 			/*
   9311 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9312 			 * so we should update sc->sc_ctrl
   9313 			 */
   9314 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9315 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9316 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9317 			if (status & STATUS_FD)
   9318 				sc->sc_tctl |=
   9319 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9320 			else
   9321 				sc->sc_tctl |=
   9322 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9323 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9324 				sc->sc_fcrtl |= FCRTL_XONE;
   9325 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9326 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9327 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9328 				      sc->sc_fcrtl);
   9329 			sc->sc_tbi_linkup = 1;
   9330 		} else {
   9331 			if (i == WM_LINKUP_TIMEOUT)
   9332 				wm_check_for_link(sc);
   9333 			/* Link is down. */
   9334 			DPRINTF(WM_DEBUG_LINK,
   9335 			    ("%s: LINK: set media -> link down\n",
   9336 			    device_xname(sc->sc_dev)));
   9337 			sc->sc_tbi_linkup = 0;
   9338 		}
   9339 	} else {
   9340 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9341 		    device_xname(sc->sc_dev)));
   9342 		sc->sc_tbi_linkup = 0;
   9343 	}
   9344 
   9345 	wm_tbi_serdes_set_linkled(sc);
   9346 
   9347 	return 0;
   9348 }
   9349 
   9350 /*
   9351  * wm_tbi_mediastatus:	[ifmedia interface function]
   9352  *
   9353  *	Get the current interface media status on a 1000BASE-X device.
   9354  */
   9355 static void
   9356 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9357 {
   9358 	struct wm_softc *sc = ifp->if_softc;
   9359 	uint32_t ctrl, status;
   9360 
   9361 	ifmr->ifm_status = IFM_AVALID;
   9362 	ifmr->ifm_active = IFM_ETHER;
   9363 
   9364 	status = CSR_READ(sc, WMREG_STATUS);
   9365 	if ((status & STATUS_LU) == 0) {
   9366 		ifmr->ifm_active |= IFM_NONE;
   9367 		return;
   9368 	}
   9369 
   9370 	ifmr->ifm_status |= IFM_ACTIVE;
   9371 	/* Only 82545 is LX */
   9372 	if (sc->sc_type == WM_T_82545)
   9373 		ifmr->ifm_active |= IFM_1000_LX;
   9374 	else
   9375 		ifmr->ifm_active |= IFM_1000_SX;
   9376 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9377 		ifmr->ifm_active |= IFM_FDX;
   9378 	else
   9379 		ifmr->ifm_active |= IFM_HDX;
   9380 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9381 	if (ctrl & CTRL_RFCE)
   9382 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9383 	if (ctrl & CTRL_TFCE)
   9384 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9385 }
   9386 
   9387 /* XXX TBI only */
   9388 static int
   9389 wm_check_for_link(struct wm_softc *sc)
   9390 {
   9391 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9392 	uint32_t rxcw;
   9393 	uint32_t ctrl;
   9394 	uint32_t status;
   9395 	uint32_t sig;
   9396 
   9397 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9398 		/* XXX need some work for >= 82571 */
   9399 		if (sc->sc_type >= WM_T_82571) {
   9400 			sc->sc_tbi_linkup = 1;
   9401 			return 0;
   9402 		}
   9403 	}
   9404 
   9405 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9406 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9407 	status = CSR_READ(sc, WMREG_STATUS);
   9408 
   9409 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9410 
   9411 	DPRINTF(WM_DEBUG_LINK,
   9412 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9413 		device_xname(sc->sc_dev), __func__,
   9414 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9415 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9416 
   9417 	/*
   9418 	 * SWDPIN   LU RXCW
   9419 	 *      0    0    0
   9420 	 *      0    0    1	(should not happen)
   9421 	 *      0    1    0	(should not happen)
   9422 	 *      0    1    1	(should not happen)
   9423 	 *      1    0    0	Disable autonego and force linkup
   9424 	 *      1    0    1	got /C/ but not linkup yet
   9425 	 *      1    1    0	(linkup)
   9426 	 *      1    1    1	If IFM_AUTO, back to autonego
   9427 	 *
   9428 	 */
   9429 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9430 	    && ((status & STATUS_LU) == 0)
   9431 	    && ((rxcw & RXCW_C) == 0)) {
   9432 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9433 			__func__));
   9434 		sc->sc_tbi_linkup = 0;
   9435 		/* Disable auto-negotiation in the TXCW register */
   9436 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9437 
   9438 		/*
   9439 		 * Force link-up and also force full-duplex.
   9440 		 *
   9441 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9442 		 * so we should update sc->sc_ctrl
   9443 		 */
   9444 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9445 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9446 	} else if (((status & STATUS_LU) != 0)
   9447 	    && ((rxcw & RXCW_C) != 0)
   9448 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9449 		sc->sc_tbi_linkup = 1;
   9450 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9451 			__func__));
   9452 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9453 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9454 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9455 	    && ((rxcw & RXCW_C) != 0)) {
   9456 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9457 	} else {
   9458 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9459 			status));
   9460 	}
   9461 
   9462 	return 0;
   9463 }
   9464 
   9465 /*
   9466  * wm_tbi_tick:
   9467  *
   9468  *	Check the link on TBI devices.
   9469  *	This function acts as mii_tick().
   9470  */
   9471 static void
   9472 wm_tbi_tick(struct wm_softc *sc)
   9473 {
   9474 	struct mii_data *mii = &sc->sc_mii;
   9475 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9476 	uint32_t status;
   9477 
   9478 	KASSERT(WM_CORE_LOCKED(sc));
   9479 
   9480 	status = CSR_READ(sc, WMREG_STATUS);
   9481 
   9482 	/* XXX is this needed? */
   9483 	(void)CSR_READ(sc, WMREG_RXCW);
   9484 	(void)CSR_READ(sc, WMREG_CTRL);
   9485 
   9486 	/* set link status */
   9487 	if ((status & STATUS_LU) == 0) {
   9488 		DPRINTF(WM_DEBUG_LINK,
   9489 		    ("%s: LINK: checklink -> down\n",
   9490 			device_xname(sc->sc_dev)));
   9491 		sc->sc_tbi_linkup = 0;
   9492 	} else if (sc->sc_tbi_linkup == 0) {
   9493 		DPRINTF(WM_DEBUG_LINK,
   9494 		    ("%s: LINK: checklink -> up %s\n",
   9495 			device_xname(sc->sc_dev),
   9496 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9497 		sc->sc_tbi_linkup = 1;
   9498 		sc->sc_tbi_serdes_ticks = 0;
   9499 	}
   9500 
   9501 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9502 		goto setled;
   9503 
   9504 	if ((status & STATUS_LU) == 0) {
   9505 		sc->sc_tbi_linkup = 0;
   9506 		/* If the timer expired, retry autonegotiation */
   9507 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9508 		    && (++sc->sc_tbi_serdes_ticks
   9509 			>= sc->sc_tbi_serdes_anegticks)) {
   9510 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9511 			sc->sc_tbi_serdes_ticks = 0;
   9512 			/*
   9513 			 * Reset the link, and let autonegotiation do
   9514 			 * its thing
   9515 			 */
   9516 			sc->sc_ctrl |= CTRL_LRST;
   9517 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9518 			CSR_WRITE_FLUSH(sc);
   9519 			delay(1000);
   9520 			sc->sc_ctrl &= ~CTRL_LRST;
   9521 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9522 			CSR_WRITE_FLUSH(sc);
   9523 			delay(1000);
   9524 			CSR_WRITE(sc, WMREG_TXCW,
   9525 			    sc->sc_txcw & ~TXCW_ANE);
   9526 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9527 		}
   9528 	}
   9529 
   9530 setled:
   9531 	wm_tbi_serdes_set_linkled(sc);
   9532 }
   9533 
   9534 /* SERDES related */
   9535 static void
   9536 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9537 {
   9538 	uint32_t reg;
   9539 
   9540 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9541 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9542 		return;
   9543 
   9544 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9545 	reg |= PCS_CFG_PCS_EN;
   9546 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9547 
   9548 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9549 	reg &= ~CTRL_EXT_SWDPIN(3);
   9550 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9551 	CSR_WRITE_FLUSH(sc);
   9552 }
   9553 
   9554 static int
   9555 wm_serdes_mediachange(struct ifnet *ifp)
   9556 {
   9557 	struct wm_softc *sc = ifp->if_softc;
   9558 	bool pcs_autoneg = true; /* XXX */
   9559 	uint32_t ctrl_ext, pcs_lctl, reg;
   9560 
   9561 	/* XXX Currently, this function is not called on 8257[12] */
   9562 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9563 	    || (sc->sc_type >= WM_T_82575))
   9564 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9565 
   9566 	wm_serdes_power_up_link_82575(sc);
   9567 
   9568 	sc->sc_ctrl |= CTRL_SLU;
   9569 
   9570 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9571 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9572 
   9573 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9574 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9575 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9576 	case CTRL_EXT_LINK_MODE_SGMII:
   9577 		pcs_autoneg = true;
   9578 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9579 		break;
   9580 	case CTRL_EXT_LINK_MODE_1000KX:
   9581 		pcs_autoneg = false;
   9582 		/* FALLTHROUGH */
   9583 	default:
   9584 		if ((sc->sc_type == WM_T_82575)
   9585 		    || (sc->sc_type == WM_T_82576)) {
   9586 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9587 				pcs_autoneg = false;
   9588 		}
   9589 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9590 		    | CTRL_FRCFDX;
   9591 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9592 	}
   9593 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9594 
   9595 	if (pcs_autoneg) {
   9596 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9597 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9598 
   9599 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9600 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9601 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9602 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9603 	} else
   9604 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9605 
   9606 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9607 
   9608 
   9609 	return 0;
   9610 }
   9611 
   9612 static void
   9613 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9614 {
   9615 	struct wm_softc *sc = ifp->if_softc;
   9616 	struct mii_data *mii = &sc->sc_mii;
   9617 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9618 	uint32_t pcs_adv, pcs_lpab, reg;
   9619 
   9620 	ifmr->ifm_status = IFM_AVALID;
   9621 	ifmr->ifm_active = IFM_ETHER;
   9622 
   9623 	/* Check PCS */
   9624 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9625 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9626 		ifmr->ifm_active |= IFM_NONE;
   9627 		sc->sc_tbi_linkup = 0;
   9628 		goto setled;
   9629 	}
   9630 
   9631 	sc->sc_tbi_linkup = 1;
   9632 	ifmr->ifm_status |= IFM_ACTIVE;
   9633 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9634 	if ((reg & PCS_LSTS_FDX) != 0)
   9635 		ifmr->ifm_active |= IFM_FDX;
   9636 	else
   9637 		ifmr->ifm_active |= IFM_HDX;
   9638 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9639 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9640 		/* Check flow */
   9641 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9642 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9643 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9644 			goto setled;
   9645 		}
   9646 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9647 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9648 		DPRINTF(WM_DEBUG_LINK,
   9649 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9650 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9651 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9652 			mii->mii_media_active |= IFM_FLOW
   9653 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9654 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9655 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9656 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9657 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9658 			mii->mii_media_active |= IFM_FLOW
   9659 			    | IFM_ETH_TXPAUSE;
   9660 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9661 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9662 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9663 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9664 			mii->mii_media_active |= IFM_FLOW
   9665 			    | IFM_ETH_RXPAUSE;
   9666 		} else {
   9667 		}
   9668 	}
   9669 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9670 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9671 setled:
   9672 	wm_tbi_serdes_set_linkled(sc);
   9673 }
   9674 
   9675 /*
   9676  * wm_serdes_tick:
   9677  *
   9678  *	Check the link on serdes devices.
   9679  */
   9680 static void
   9681 wm_serdes_tick(struct wm_softc *sc)
   9682 {
   9683 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9684 	struct mii_data *mii = &sc->sc_mii;
   9685 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9686 	uint32_t reg;
   9687 
   9688 	KASSERT(WM_CORE_LOCKED(sc));
   9689 
   9690 	mii->mii_media_status = IFM_AVALID;
   9691 	mii->mii_media_active = IFM_ETHER;
   9692 
   9693 	/* Check PCS */
   9694 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9695 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9696 		mii->mii_media_status |= IFM_ACTIVE;
   9697 		sc->sc_tbi_linkup = 1;
   9698 		sc->sc_tbi_serdes_ticks = 0;
   9699 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9700 		if ((reg & PCS_LSTS_FDX) != 0)
   9701 			mii->mii_media_active |= IFM_FDX;
   9702 		else
   9703 			mii->mii_media_active |= IFM_HDX;
   9704 	} else {
   9705 		mii->mii_media_status |= IFM_NONE;
   9706 		sc->sc_tbi_linkup = 0;
   9707 		    /* If the timer expired, retry autonegotiation */
   9708 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9709 		    && (++sc->sc_tbi_serdes_ticks
   9710 			>= sc->sc_tbi_serdes_anegticks)) {
   9711 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9712 			sc->sc_tbi_serdes_ticks = 0;
   9713 			/* XXX */
   9714 			wm_serdes_mediachange(ifp);
   9715 		}
   9716 	}
   9717 
   9718 	wm_tbi_serdes_set_linkled(sc);
   9719 }
   9720 
   9721 /* SFP related */
   9722 
   9723 static int
   9724 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9725 {
   9726 	uint32_t i2ccmd;
   9727 	int i;
   9728 
   9729 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9730 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9731 
   9732 	/* Poll the ready bit */
   9733 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9734 		delay(50);
   9735 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9736 		if (i2ccmd & I2CCMD_READY)
   9737 			break;
   9738 	}
   9739 	if ((i2ccmd & I2CCMD_READY) == 0)
   9740 		return -1;
   9741 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9742 		return -1;
   9743 
   9744 	*data = i2ccmd & 0x00ff;
   9745 
   9746 	return 0;
   9747 }
   9748 
   9749 static uint32_t
   9750 wm_sfp_get_media_type(struct wm_softc *sc)
   9751 {
   9752 	uint32_t ctrl_ext;
   9753 	uint8_t val = 0;
   9754 	int timeout = 3;
   9755 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9756 	int rv = -1;
   9757 
   9758 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9759 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9760 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9761 	CSR_WRITE_FLUSH(sc);
   9762 
   9763 	/* Read SFP module data */
   9764 	while (timeout) {
   9765 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9766 		if (rv == 0)
   9767 			break;
   9768 		delay(100*1000); /* XXX too big */
   9769 		timeout--;
   9770 	}
   9771 	if (rv != 0)
   9772 		goto out;
   9773 	switch (val) {
   9774 	case SFF_SFP_ID_SFF:
   9775 		aprint_normal_dev(sc->sc_dev,
   9776 		    "Module/Connector soldered to board\n");
   9777 		break;
   9778 	case SFF_SFP_ID_SFP:
   9779 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9780 		break;
   9781 	case SFF_SFP_ID_UNKNOWN:
   9782 		goto out;
   9783 	default:
   9784 		break;
   9785 	}
   9786 
   9787 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9788 	if (rv != 0) {
   9789 		goto out;
   9790 	}
   9791 
   9792 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9793 		mediatype = WM_MEDIATYPE_SERDES;
   9794 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9795 		sc->sc_flags |= WM_F_SGMII;
   9796 		mediatype = WM_MEDIATYPE_COPPER;
   9797 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9798 		sc->sc_flags |= WM_F_SGMII;
   9799 		mediatype = WM_MEDIATYPE_SERDES;
   9800 	}
   9801 
   9802 out:
   9803 	/* Restore I2C interface setting */
   9804 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9805 
   9806 	return mediatype;
   9807 }
   9808 /*
   9809  * NVM related.
   9810  * Microwire, SPI (w/wo EERD) and Flash.
   9811  */
   9812 
   9813 /* Both spi and uwire */
   9814 
   9815 /*
   9816  * wm_eeprom_sendbits:
   9817  *
   9818  *	Send a series of bits to the EEPROM.
   9819  */
   9820 static void
   9821 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9822 {
   9823 	uint32_t reg;
   9824 	int x;
   9825 
   9826 	reg = CSR_READ(sc, WMREG_EECD);
   9827 
   9828 	for (x = nbits; x > 0; x--) {
   9829 		if (bits & (1U << (x - 1)))
   9830 			reg |= EECD_DI;
   9831 		else
   9832 			reg &= ~EECD_DI;
   9833 		CSR_WRITE(sc, WMREG_EECD, reg);
   9834 		CSR_WRITE_FLUSH(sc);
   9835 		delay(2);
   9836 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9837 		CSR_WRITE_FLUSH(sc);
   9838 		delay(2);
   9839 		CSR_WRITE(sc, WMREG_EECD, reg);
   9840 		CSR_WRITE_FLUSH(sc);
   9841 		delay(2);
   9842 	}
   9843 }
   9844 
   9845 /*
   9846  * wm_eeprom_recvbits:
   9847  *
   9848  *	Receive a series of bits from the EEPROM.
   9849  */
   9850 static void
   9851 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9852 {
   9853 	uint32_t reg, val;
   9854 	int x;
   9855 
   9856 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9857 
   9858 	val = 0;
   9859 	for (x = nbits; x > 0; x--) {
   9860 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9861 		CSR_WRITE_FLUSH(sc);
   9862 		delay(2);
   9863 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9864 			val |= (1U << (x - 1));
   9865 		CSR_WRITE(sc, WMREG_EECD, reg);
   9866 		CSR_WRITE_FLUSH(sc);
   9867 		delay(2);
   9868 	}
   9869 	*valp = val;
   9870 }
   9871 
   9872 /* Microwire */
   9873 
   9874 /*
   9875  * wm_nvm_read_uwire:
   9876  *
   9877  *	Read a word from the EEPROM using the MicroWire protocol.
   9878  */
   9879 static int
   9880 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9881 {
   9882 	uint32_t reg, val;
   9883 	int i;
   9884 
   9885 	for (i = 0; i < wordcnt; i++) {
   9886 		/* Clear SK and DI. */
   9887 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9888 		CSR_WRITE(sc, WMREG_EECD, reg);
   9889 
   9890 		/*
   9891 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9892 		 * and Xen.
   9893 		 *
   9894 		 * We use this workaround only for 82540 because qemu's
   9895 		 * e1000 act as 82540.
   9896 		 */
   9897 		if (sc->sc_type == WM_T_82540) {
   9898 			reg |= EECD_SK;
   9899 			CSR_WRITE(sc, WMREG_EECD, reg);
   9900 			reg &= ~EECD_SK;
   9901 			CSR_WRITE(sc, WMREG_EECD, reg);
   9902 			CSR_WRITE_FLUSH(sc);
   9903 			delay(2);
   9904 		}
   9905 		/* XXX: end of workaround */
   9906 
   9907 		/* Set CHIP SELECT. */
   9908 		reg |= EECD_CS;
   9909 		CSR_WRITE(sc, WMREG_EECD, reg);
   9910 		CSR_WRITE_FLUSH(sc);
   9911 		delay(2);
   9912 
   9913 		/* Shift in the READ command. */
   9914 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9915 
   9916 		/* Shift in address. */
   9917 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9918 
   9919 		/* Shift out the data. */
   9920 		wm_eeprom_recvbits(sc, &val, 16);
   9921 		data[i] = val & 0xffff;
   9922 
   9923 		/* Clear CHIP SELECT. */
   9924 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9925 		CSR_WRITE(sc, WMREG_EECD, reg);
   9926 		CSR_WRITE_FLUSH(sc);
   9927 		delay(2);
   9928 	}
   9929 
   9930 	return 0;
   9931 }
   9932 
   9933 /* SPI */
   9934 
   9935 /*
   9936  * Set SPI and FLASH related information from the EECD register.
   9937  * For 82541 and 82547, the word size is taken from EEPROM.
   9938  */
   9939 static int
   9940 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9941 {
   9942 	int size;
   9943 	uint32_t reg;
   9944 	uint16_t data;
   9945 
   9946 	reg = CSR_READ(sc, WMREG_EECD);
   9947 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9948 
   9949 	/* Read the size of NVM from EECD by default */
   9950 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9951 	switch (sc->sc_type) {
   9952 	case WM_T_82541:
   9953 	case WM_T_82541_2:
   9954 	case WM_T_82547:
   9955 	case WM_T_82547_2:
   9956 		/* Set dummy value to access EEPROM */
   9957 		sc->sc_nvm_wordsize = 64;
   9958 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9959 		reg = data;
   9960 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9961 		if (size == 0)
   9962 			size = 6; /* 64 word size */
   9963 		else
   9964 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9965 		break;
   9966 	case WM_T_80003:
   9967 	case WM_T_82571:
   9968 	case WM_T_82572:
   9969 	case WM_T_82573: /* SPI case */
   9970 	case WM_T_82574: /* SPI case */
   9971 	case WM_T_82583: /* SPI case */
   9972 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9973 		if (size > 14)
   9974 			size = 14;
   9975 		break;
   9976 	case WM_T_82575:
   9977 	case WM_T_82576:
   9978 	case WM_T_82580:
   9979 	case WM_T_I350:
   9980 	case WM_T_I354:
   9981 	case WM_T_I210:
   9982 	case WM_T_I211:
   9983 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9984 		if (size > 15)
   9985 			size = 15;
   9986 		break;
   9987 	default:
   9988 		aprint_error_dev(sc->sc_dev,
   9989 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9990 		return -1;
   9991 		break;
   9992 	}
   9993 
   9994 	sc->sc_nvm_wordsize = 1 << size;
   9995 
   9996 	return 0;
   9997 }
   9998 
   9999 /*
   10000  * wm_nvm_ready_spi:
   10001  *
   10002  *	Wait for a SPI EEPROM to be ready for commands.
   10003  */
   10004 static int
   10005 wm_nvm_ready_spi(struct wm_softc *sc)
   10006 {
   10007 	uint32_t val;
   10008 	int usec;
   10009 
   10010 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10011 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10012 		wm_eeprom_recvbits(sc, &val, 8);
   10013 		if ((val & SPI_SR_RDY) == 0)
   10014 			break;
   10015 	}
   10016 	if (usec >= SPI_MAX_RETRIES) {
   10017 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10018 		return 1;
   10019 	}
   10020 	return 0;
   10021 }
   10022 
   10023 /*
   10024  * wm_nvm_read_spi:
   10025  *
   10026  *	Read a work from the EEPROM using the SPI protocol.
   10027  */
   10028 static int
   10029 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10030 {
   10031 	uint32_t reg, val;
   10032 	int i;
   10033 	uint8_t opc;
   10034 
   10035 	/* Clear SK and CS. */
   10036 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10037 	CSR_WRITE(sc, WMREG_EECD, reg);
   10038 	CSR_WRITE_FLUSH(sc);
   10039 	delay(2);
   10040 
   10041 	if (wm_nvm_ready_spi(sc))
   10042 		return 1;
   10043 
   10044 	/* Toggle CS to flush commands. */
   10045 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10046 	CSR_WRITE_FLUSH(sc);
   10047 	delay(2);
   10048 	CSR_WRITE(sc, WMREG_EECD, reg);
   10049 	CSR_WRITE_FLUSH(sc);
   10050 	delay(2);
   10051 
   10052 	opc = SPI_OPC_READ;
   10053 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10054 		opc |= SPI_OPC_A8;
   10055 
   10056 	wm_eeprom_sendbits(sc, opc, 8);
   10057 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10058 
   10059 	for (i = 0; i < wordcnt; i++) {
   10060 		wm_eeprom_recvbits(sc, &val, 16);
   10061 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10062 	}
   10063 
   10064 	/* Raise CS and clear SK. */
   10065 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10066 	CSR_WRITE(sc, WMREG_EECD, reg);
   10067 	CSR_WRITE_FLUSH(sc);
   10068 	delay(2);
   10069 
   10070 	return 0;
   10071 }
   10072 
   10073 /* Using with EERD */
   10074 
   10075 static int
   10076 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10077 {
   10078 	uint32_t attempts = 100000;
   10079 	uint32_t i, reg = 0;
   10080 	int32_t done = -1;
   10081 
   10082 	for (i = 0; i < attempts; i++) {
   10083 		reg = CSR_READ(sc, rw);
   10084 
   10085 		if (reg & EERD_DONE) {
   10086 			done = 0;
   10087 			break;
   10088 		}
   10089 		delay(5);
   10090 	}
   10091 
   10092 	return done;
   10093 }
   10094 
   10095 static int
   10096 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10097     uint16_t *data)
   10098 {
   10099 	int i, eerd = 0;
   10100 	int error = 0;
   10101 
   10102 	for (i = 0; i < wordcnt; i++) {
   10103 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10104 
   10105 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10106 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10107 		if (error != 0)
   10108 			break;
   10109 
   10110 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10111 	}
   10112 
   10113 	return error;
   10114 }
   10115 
   10116 /* Flash */
   10117 
   10118 static int
   10119 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10120 {
   10121 	uint32_t eecd;
   10122 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10123 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10124 	uint8_t sig_byte = 0;
   10125 
   10126 	switch (sc->sc_type) {
   10127 	case WM_T_PCH_SPT:
   10128 		/*
   10129 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10130 		 * sector valid bits from the NVM.
   10131 		 */
   10132 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10133 		if ((*bank == 0) || (*bank == 1)) {
   10134 			aprint_error_dev(sc->sc_dev,
   10135 					 "%s: no valid NVM bank present\n",
   10136 				__func__);
   10137 			return -1;
   10138 		} else {
   10139 			*bank = *bank - 2;
   10140 			return 0;
   10141 		}
   10142 	case WM_T_ICH8:
   10143 	case WM_T_ICH9:
   10144 		eecd = CSR_READ(sc, WMREG_EECD);
   10145 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10146 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10147 			return 0;
   10148 		}
   10149 		/* FALLTHROUGH */
   10150 	default:
   10151 		/* Default to 0 */
   10152 		*bank = 0;
   10153 
   10154 		/* Check bank 0 */
   10155 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10156 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10157 			*bank = 0;
   10158 			return 0;
   10159 		}
   10160 
   10161 		/* Check bank 1 */
   10162 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10163 		    &sig_byte);
   10164 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10165 			*bank = 1;
   10166 			return 0;
   10167 		}
   10168 	}
   10169 
   10170 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10171 		device_xname(sc->sc_dev)));
   10172 	return -1;
   10173 }
   10174 
   10175 /******************************************************************************
   10176  * This function does initial flash setup so that a new read/write/erase cycle
   10177  * can be started.
   10178  *
   10179  * sc - The pointer to the hw structure
   10180  ****************************************************************************/
   10181 static int32_t
   10182 wm_ich8_cycle_init(struct wm_softc *sc)
   10183 {
   10184 	uint16_t hsfsts;
   10185 	int32_t error = 1;
   10186 	int32_t i     = 0;
   10187 
   10188 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10189 
   10190 	/* May be check the Flash Des Valid bit in Hw status */
   10191 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10192 		return error;
   10193 	}
   10194 
   10195 	/* Clear FCERR in Hw status by writing 1 */
   10196 	/* Clear DAEL in Hw status by writing a 1 */
   10197 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10198 
   10199 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10200 
   10201 	/*
   10202 	 * Either we should have a hardware SPI cycle in progress bit to check
   10203 	 * against, in order to start a new cycle or FDONE bit should be
   10204 	 * changed in the hardware so that it is 1 after harware reset, which
   10205 	 * can then be used as an indication whether a cycle is in progress or
   10206 	 * has been completed .. we should also have some software semaphore
   10207 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10208 	 * threads access to those bits can be sequentiallized or a way so that
   10209 	 * 2 threads dont start the cycle at the same time
   10210 	 */
   10211 
   10212 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10213 		/*
   10214 		 * There is no cycle running at present, so we can start a
   10215 		 * cycle
   10216 		 */
   10217 
   10218 		/* Begin by setting Flash Cycle Done. */
   10219 		hsfsts |= HSFSTS_DONE;
   10220 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10221 		error = 0;
   10222 	} else {
   10223 		/*
   10224 		 * otherwise poll for sometime so the current cycle has a
   10225 		 * chance to end before giving up.
   10226 		 */
   10227 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10228 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10229 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10230 				error = 0;
   10231 				break;
   10232 			}
   10233 			delay(1);
   10234 		}
   10235 		if (error == 0) {
   10236 			/*
   10237 			 * Successful in waiting for previous cycle to timeout,
   10238 			 * now set the Flash Cycle Done.
   10239 			 */
   10240 			hsfsts |= HSFSTS_DONE;
   10241 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10242 		}
   10243 	}
   10244 	return error;
   10245 }
   10246 
   10247 /******************************************************************************
   10248  * This function starts a flash cycle and waits for its completion
   10249  *
   10250  * sc - The pointer to the hw structure
   10251  ****************************************************************************/
   10252 static int32_t
   10253 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10254 {
   10255 	uint16_t hsflctl;
   10256 	uint16_t hsfsts;
   10257 	int32_t error = 1;
   10258 	uint32_t i = 0;
   10259 
   10260 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10261 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10262 	hsflctl |= HSFCTL_GO;
   10263 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10264 
   10265 	/* Wait till FDONE bit is set to 1 */
   10266 	do {
   10267 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10268 		if (hsfsts & HSFSTS_DONE)
   10269 			break;
   10270 		delay(1);
   10271 		i++;
   10272 	} while (i < timeout);
   10273 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10274 		error = 0;
   10275 
   10276 	return error;
   10277 }
   10278 
   10279 /******************************************************************************
   10280  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10281  *
   10282  * sc - The pointer to the hw structure
   10283  * index - The index of the byte or word to read.
   10284  * size - Size of data to read, 1=byte 2=word, 4=dword
   10285  * data - Pointer to the word to store the value read.
   10286  *****************************************************************************/
   10287 static int32_t
   10288 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10289     uint32_t size, uint32_t *data)
   10290 {
   10291 	uint16_t hsfsts;
   10292 	uint16_t hsflctl;
   10293 	uint32_t flash_linear_address;
   10294 	uint32_t flash_data = 0;
   10295 	int32_t error = 1;
   10296 	int32_t count = 0;
   10297 
   10298 	if (size < 1  || size > 4 || data == 0x0 ||
   10299 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10300 		return error;
   10301 
   10302 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10303 	    sc->sc_ich8_flash_base;
   10304 
   10305 	do {
   10306 		delay(1);
   10307 		/* Steps */
   10308 		error = wm_ich8_cycle_init(sc);
   10309 		if (error)
   10310 			break;
   10311 
   10312 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10313 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10314 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10315 		    & HSFCTL_BCOUNT_MASK;
   10316 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10317 		if (sc->sc_type == WM_T_PCH_SPT) {
   10318 			/*
   10319 			 * In SPT, This register is in Lan memory space, not
   10320 			 * flash. Therefore, only 32 bit access is supported.
   10321 			 */
   10322 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10323 			    (uint32_t)hsflctl);
   10324 		} else
   10325 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10326 
   10327 		/*
   10328 		 * Write the last 24 bits of index into Flash Linear address
   10329 		 * field in Flash Address
   10330 		 */
   10331 		/* TODO: TBD maybe check the index against the size of flash */
   10332 
   10333 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10334 
   10335 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10336 
   10337 		/*
   10338 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10339 		 * the whole sequence a few more times, else read in (shift in)
   10340 		 * the Flash Data0, the order is least significant byte first
   10341 		 * msb to lsb
   10342 		 */
   10343 		if (error == 0) {
   10344 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10345 			if (size == 1)
   10346 				*data = (uint8_t)(flash_data & 0x000000FF);
   10347 			else if (size == 2)
   10348 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10349 			else if (size == 4)
   10350 				*data = (uint32_t)flash_data;
   10351 			break;
   10352 		} else {
   10353 			/*
   10354 			 * If we've gotten here, then things are probably
   10355 			 * completely hosed, but if the error condition is
   10356 			 * detected, it won't hurt to give it another try...
   10357 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10358 			 */
   10359 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10360 			if (hsfsts & HSFSTS_ERR) {
   10361 				/* Repeat for some time before giving up. */
   10362 				continue;
   10363 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10364 				break;
   10365 		}
   10366 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10367 
   10368 	return error;
   10369 }
   10370 
   10371 /******************************************************************************
   10372  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10373  *
   10374  * sc - pointer to wm_hw structure
   10375  * index - The index of the byte to read.
   10376  * data - Pointer to a byte to store the value read.
   10377  *****************************************************************************/
   10378 static int32_t
   10379 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10380 {
   10381 	int32_t status;
   10382 	uint32_t word = 0;
   10383 
   10384 	status = wm_read_ich8_data(sc, index, 1, &word);
   10385 	if (status == 0)
   10386 		*data = (uint8_t)word;
   10387 	else
   10388 		*data = 0;
   10389 
   10390 	return status;
   10391 }
   10392 
   10393 /******************************************************************************
   10394  * Reads a word from the NVM using the ICH8 flash access registers.
   10395  *
   10396  * sc - pointer to wm_hw structure
   10397  * index - The starting byte index of the word to read.
   10398  * data - Pointer to a word to store the value read.
   10399  *****************************************************************************/
   10400 static int32_t
   10401 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10402 {
   10403 	int32_t status;
   10404 	uint32_t word = 0;
   10405 
   10406 	status = wm_read_ich8_data(sc, index, 2, &word);
   10407 	if (status == 0)
   10408 		*data = (uint16_t)word;
   10409 	else
   10410 		*data = 0;
   10411 
   10412 	return status;
   10413 }
   10414 
   10415 /******************************************************************************
   10416  * Reads a dword from the NVM using the ICH8 flash access registers.
   10417  *
   10418  * sc - pointer to wm_hw structure
   10419  * index - The starting byte index of the word to read.
   10420  * data - Pointer to a word to store the value read.
   10421  *****************************************************************************/
   10422 static int32_t
   10423 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10424 {
   10425 	int32_t status;
   10426 
   10427 	status = wm_read_ich8_data(sc, index, 4, data);
   10428 	return status;
   10429 }
   10430 
   10431 /******************************************************************************
   10432  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10433  * register.
   10434  *
   10435  * sc - Struct containing variables accessed by shared code
   10436  * offset - offset of word in the EEPROM to read
   10437  * data - word read from the EEPROM
   10438  * words - number of words to read
   10439  *****************************************************************************/
   10440 static int
   10441 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10442 {
   10443 	int32_t  error = 0;
   10444 	uint32_t flash_bank = 0;
   10445 	uint32_t act_offset = 0;
   10446 	uint32_t bank_offset = 0;
   10447 	uint16_t word = 0;
   10448 	uint16_t i = 0;
   10449 
   10450 	/*
   10451 	 * We need to know which is the valid flash bank.  In the event
   10452 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10453 	 * managing flash_bank.  So it cannot be trusted and needs
   10454 	 * to be updated with each read.
   10455 	 */
   10456 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10457 	if (error) {
   10458 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10459 			device_xname(sc->sc_dev)));
   10460 		flash_bank = 0;
   10461 	}
   10462 
   10463 	/*
   10464 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10465 	 * size
   10466 	 */
   10467 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10468 
   10469 	error = wm_get_swfwhw_semaphore(sc);
   10470 	if (error) {
   10471 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10472 		    __func__);
   10473 		return error;
   10474 	}
   10475 
   10476 	for (i = 0; i < words; i++) {
   10477 		/* The NVM part needs a byte offset, hence * 2 */
   10478 		act_offset = bank_offset + ((offset + i) * 2);
   10479 		error = wm_read_ich8_word(sc, act_offset, &word);
   10480 		if (error) {
   10481 			aprint_error_dev(sc->sc_dev,
   10482 			    "%s: failed to read NVM\n", __func__);
   10483 			break;
   10484 		}
   10485 		data[i] = word;
   10486 	}
   10487 
   10488 	wm_put_swfwhw_semaphore(sc);
   10489 	return error;
   10490 }
   10491 
   10492 /******************************************************************************
   10493  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10494  * register.
   10495  *
   10496  * sc - Struct containing variables accessed by shared code
   10497  * offset - offset of word in the EEPROM to read
   10498  * data - word read from the EEPROM
   10499  * words - number of words to read
   10500  *****************************************************************************/
   10501 static int
   10502 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10503 {
   10504 	int32_t  error = 0;
   10505 	uint32_t flash_bank = 0;
   10506 	uint32_t act_offset = 0;
   10507 	uint32_t bank_offset = 0;
   10508 	uint32_t dword = 0;
   10509 	uint16_t i = 0;
   10510 
   10511 	/*
   10512 	 * We need to know which is the valid flash bank.  In the event
   10513 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10514 	 * managing flash_bank.  So it cannot be trusted and needs
   10515 	 * to be updated with each read.
   10516 	 */
   10517 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10518 	if (error) {
   10519 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10520 			device_xname(sc->sc_dev)));
   10521 		flash_bank = 0;
   10522 	}
   10523 
   10524 	/*
   10525 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10526 	 * size
   10527 	 */
   10528 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10529 
   10530 	error = wm_get_swfwhw_semaphore(sc);
   10531 	if (error) {
   10532 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10533 		    __func__);
   10534 		return error;
   10535 	}
   10536 
   10537 	for (i = 0; i < words; i++) {
   10538 		/* The NVM part needs a byte offset, hence * 2 */
   10539 		act_offset = bank_offset + ((offset + i) * 2);
   10540 		/* but we must read dword aligned, so mask ... */
   10541 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10542 		if (error) {
   10543 			aprint_error_dev(sc->sc_dev,
   10544 			    "%s: failed to read NVM\n", __func__);
   10545 			break;
   10546 		}
   10547 		/* ... and pick out low or high word */
   10548 		if ((act_offset & 0x2) == 0)
   10549 			data[i] = (uint16_t)(dword & 0xFFFF);
   10550 		else
   10551 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10552 	}
   10553 
   10554 	wm_put_swfwhw_semaphore(sc);
   10555 	return error;
   10556 }
   10557 
   10558 /* iNVM */
   10559 
   10560 static int
   10561 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10562 {
   10563 	int32_t  rv = 0;
   10564 	uint32_t invm_dword;
   10565 	uint16_t i;
   10566 	uint8_t record_type, word_address;
   10567 
   10568 	for (i = 0; i < INVM_SIZE; i++) {
   10569 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10570 		/* Get record type */
   10571 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10572 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10573 			break;
   10574 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10575 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10576 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10577 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10578 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10579 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10580 			if (word_address == address) {
   10581 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10582 				rv = 0;
   10583 				break;
   10584 			}
   10585 		}
   10586 	}
   10587 
   10588 	return rv;
   10589 }
   10590 
   10591 static int
   10592 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10593 {
   10594 	int rv = 0;
   10595 	int i;
   10596 
   10597 	for (i = 0; i < words; i++) {
   10598 		switch (offset + i) {
   10599 		case NVM_OFF_MACADDR:
   10600 		case NVM_OFF_MACADDR1:
   10601 		case NVM_OFF_MACADDR2:
   10602 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10603 			if (rv != 0) {
   10604 				data[i] = 0xffff;
   10605 				rv = -1;
   10606 			}
   10607 			break;
   10608 		case NVM_OFF_CFG2:
   10609 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10610 			if (rv != 0) {
   10611 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10612 				rv = 0;
   10613 			}
   10614 			break;
   10615 		case NVM_OFF_CFG4:
   10616 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10617 			if (rv != 0) {
   10618 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10619 				rv = 0;
   10620 			}
   10621 			break;
   10622 		case NVM_OFF_LED_1_CFG:
   10623 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10624 			if (rv != 0) {
   10625 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10626 				rv = 0;
   10627 			}
   10628 			break;
   10629 		case NVM_OFF_LED_0_2_CFG:
   10630 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10631 			if (rv != 0) {
   10632 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10633 				rv = 0;
   10634 			}
   10635 			break;
   10636 		case NVM_OFF_ID_LED_SETTINGS:
   10637 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10638 			if (rv != 0) {
   10639 				*data = ID_LED_RESERVED_FFFF;
   10640 				rv = 0;
   10641 			}
   10642 			break;
   10643 		default:
   10644 			DPRINTF(WM_DEBUG_NVM,
   10645 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10646 			*data = NVM_RESERVED_WORD;
   10647 			break;
   10648 		}
   10649 	}
   10650 
   10651 	return rv;
   10652 }
   10653 
   10654 /* Lock, detecting NVM type, validate checksum, version and read */
   10655 
   10656 /*
   10657  * wm_nvm_acquire:
   10658  *
   10659  *	Perform the EEPROM handshake required on some chips.
   10660  */
   10661 static int
   10662 wm_nvm_acquire(struct wm_softc *sc)
   10663 {
   10664 	uint32_t reg;
   10665 	int x;
   10666 	int ret = 0;
   10667 
   10668 	/* always success */
   10669 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10670 		return 0;
   10671 
   10672 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10673 		ret = wm_get_swfwhw_semaphore(sc);
   10674 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10675 		/* This will also do wm_get_swsm_semaphore() if needed */
   10676 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10677 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10678 		ret = wm_get_swsm_semaphore(sc);
   10679 	}
   10680 
   10681 	if (ret) {
   10682 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10683 			__func__);
   10684 		return 1;
   10685 	}
   10686 
   10687 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10688 		reg = CSR_READ(sc, WMREG_EECD);
   10689 
   10690 		/* Request EEPROM access. */
   10691 		reg |= EECD_EE_REQ;
   10692 		CSR_WRITE(sc, WMREG_EECD, reg);
   10693 
   10694 		/* ..and wait for it to be granted. */
   10695 		for (x = 0; x < 1000; x++) {
   10696 			reg = CSR_READ(sc, WMREG_EECD);
   10697 			if (reg & EECD_EE_GNT)
   10698 				break;
   10699 			delay(5);
   10700 		}
   10701 		if ((reg & EECD_EE_GNT) == 0) {
   10702 			aprint_error_dev(sc->sc_dev,
   10703 			    "could not acquire EEPROM GNT\n");
   10704 			reg &= ~EECD_EE_REQ;
   10705 			CSR_WRITE(sc, WMREG_EECD, reg);
   10706 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10707 				wm_put_swfwhw_semaphore(sc);
   10708 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10709 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10710 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10711 				wm_put_swsm_semaphore(sc);
   10712 			return 1;
   10713 		}
   10714 	}
   10715 
   10716 	return 0;
   10717 }
   10718 
   10719 /*
   10720  * wm_nvm_release:
   10721  *
   10722  *	Release the EEPROM mutex.
   10723  */
   10724 static void
   10725 wm_nvm_release(struct wm_softc *sc)
   10726 {
   10727 	uint32_t reg;
   10728 
   10729 	/* always success */
   10730 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10731 		return;
   10732 
   10733 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10734 		reg = CSR_READ(sc, WMREG_EECD);
   10735 		reg &= ~EECD_EE_REQ;
   10736 		CSR_WRITE(sc, WMREG_EECD, reg);
   10737 	}
   10738 
   10739 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10740 		wm_put_swfwhw_semaphore(sc);
   10741 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10742 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10743 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10744 		wm_put_swsm_semaphore(sc);
   10745 }
   10746 
   10747 static int
   10748 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10749 {
   10750 	uint32_t eecd = 0;
   10751 
   10752 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10753 	    || sc->sc_type == WM_T_82583) {
   10754 		eecd = CSR_READ(sc, WMREG_EECD);
   10755 
   10756 		/* Isolate bits 15 & 16 */
   10757 		eecd = ((eecd >> 15) & 0x03);
   10758 
   10759 		/* If both bits are set, device is Flash type */
   10760 		if (eecd == 0x03)
   10761 			return 0;
   10762 	}
   10763 	return 1;
   10764 }
   10765 
   10766 static int
   10767 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10768 {
   10769 	uint32_t eec;
   10770 
   10771 	eec = CSR_READ(sc, WMREG_EEC);
   10772 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10773 		return 1;
   10774 
   10775 	return 0;
   10776 }
   10777 
   10778 /*
   10779  * wm_nvm_validate_checksum
   10780  *
   10781  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10782  */
   10783 static int
   10784 wm_nvm_validate_checksum(struct wm_softc *sc)
   10785 {
   10786 	uint16_t checksum;
   10787 	uint16_t eeprom_data;
   10788 #ifdef WM_DEBUG
   10789 	uint16_t csum_wordaddr, valid_checksum;
   10790 #endif
   10791 	int i;
   10792 
   10793 	checksum = 0;
   10794 
   10795 	/* Don't check for I211 */
   10796 	if (sc->sc_type == WM_T_I211)
   10797 		return 0;
   10798 
   10799 #ifdef WM_DEBUG
   10800 	if (sc->sc_type == WM_T_PCH_LPT) {
   10801 		csum_wordaddr = NVM_OFF_COMPAT;
   10802 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10803 	} else {
   10804 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10805 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10806 	}
   10807 
   10808 	/* Dump EEPROM image for debug */
   10809 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10810 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10811 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10812 		/* XXX PCH_SPT? */
   10813 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10814 		if ((eeprom_data & valid_checksum) == 0) {
   10815 			DPRINTF(WM_DEBUG_NVM,
   10816 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10817 				device_xname(sc->sc_dev), eeprom_data,
   10818 				    valid_checksum));
   10819 		}
   10820 	}
   10821 
   10822 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10823 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10824 		for (i = 0; i < NVM_SIZE; i++) {
   10825 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10826 				printf("XXXX ");
   10827 			else
   10828 				printf("%04hx ", eeprom_data);
   10829 			if (i % 8 == 7)
   10830 				printf("\n");
   10831 		}
   10832 	}
   10833 
   10834 #endif /* WM_DEBUG */
   10835 
   10836 	for (i = 0; i < NVM_SIZE; i++) {
   10837 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10838 			return 1;
   10839 		checksum += eeprom_data;
   10840 	}
   10841 
   10842 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10843 #ifdef WM_DEBUG
   10844 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10845 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10846 #endif
   10847 	}
   10848 
   10849 	return 0;
   10850 }
   10851 
   10852 static void
   10853 wm_nvm_version_invm(struct wm_softc *sc)
   10854 {
   10855 	uint32_t dword;
   10856 
   10857 	/*
   10858 	 * Linux's code to decode version is very strange, so we don't
   10859 	 * obey that algorithm and just use word 61 as the document.
   10860 	 * Perhaps it's not perfect though...
   10861 	 *
   10862 	 * Example:
   10863 	 *
   10864 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10865 	 */
   10866 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10867 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10868 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10869 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10870 }
   10871 
   10872 static void
   10873 wm_nvm_version(struct wm_softc *sc)
   10874 {
   10875 	uint16_t major, minor, build, patch;
   10876 	uint16_t uid0, uid1;
   10877 	uint16_t nvm_data;
   10878 	uint16_t off;
   10879 	bool check_version = false;
   10880 	bool check_optionrom = false;
   10881 	bool have_build = false;
   10882 
   10883 	/*
   10884 	 * Version format:
   10885 	 *
   10886 	 * XYYZ
   10887 	 * X0YZ
   10888 	 * X0YY
   10889 	 *
   10890 	 * Example:
   10891 	 *
   10892 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10893 	 *	82571	0x50a6	5.10.6?
   10894 	 *	82572	0x506a	5.6.10?
   10895 	 *	82572EI	0x5069	5.6.9?
   10896 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10897 	 *		0x2013	2.1.3?
   10898 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10899 	 */
   10900 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10901 	switch (sc->sc_type) {
   10902 	case WM_T_82571:
   10903 	case WM_T_82572:
   10904 	case WM_T_82574:
   10905 	case WM_T_82583:
   10906 		check_version = true;
   10907 		check_optionrom = true;
   10908 		have_build = true;
   10909 		break;
   10910 	case WM_T_82575:
   10911 	case WM_T_82576:
   10912 	case WM_T_82580:
   10913 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10914 			check_version = true;
   10915 		break;
   10916 	case WM_T_I211:
   10917 		wm_nvm_version_invm(sc);
   10918 		goto printver;
   10919 	case WM_T_I210:
   10920 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10921 			wm_nvm_version_invm(sc);
   10922 			goto printver;
   10923 		}
   10924 		/* FALLTHROUGH */
   10925 	case WM_T_I350:
   10926 	case WM_T_I354:
   10927 		check_version = true;
   10928 		check_optionrom = true;
   10929 		break;
   10930 	default:
   10931 		return;
   10932 	}
   10933 	if (check_version) {
   10934 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10935 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10936 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10937 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10938 			build = nvm_data & NVM_BUILD_MASK;
   10939 			have_build = true;
   10940 		} else
   10941 			minor = nvm_data & 0x00ff;
   10942 
   10943 		/* Decimal */
   10944 		minor = (minor / 16) * 10 + (minor % 16);
   10945 		sc->sc_nvm_ver_major = major;
   10946 		sc->sc_nvm_ver_minor = minor;
   10947 
   10948 printver:
   10949 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10950 		    sc->sc_nvm_ver_minor);
   10951 		if (have_build) {
   10952 			sc->sc_nvm_ver_build = build;
   10953 			aprint_verbose(".%d", build);
   10954 		}
   10955 	}
   10956 	if (check_optionrom) {
   10957 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10958 		/* Option ROM Version */
   10959 		if ((off != 0x0000) && (off != 0xffff)) {
   10960 			off += NVM_COMBO_VER_OFF;
   10961 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10962 			wm_nvm_read(sc, off, 1, &uid0);
   10963 			if ((uid0 != 0) && (uid0 != 0xffff)
   10964 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10965 				/* 16bits */
   10966 				major = uid0 >> 8;
   10967 				build = (uid0 << 8) | (uid1 >> 8);
   10968 				patch = uid1 & 0x00ff;
   10969 				aprint_verbose(", option ROM Version %d.%d.%d",
   10970 				    major, build, patch);
   10971 			}
   10972 		}
   10973 	}
   10974 
   10975 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10976 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10977 }
   10978 
   10979 /*
   10980  * wm_nvm_read:
   10981  *
   10982  *	Read data from the serial EEPROM.
   10983  */
   10984 static int
   10985 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10986 {
   10987 	int rv;
   10988 
   10989 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10990 		return 1;
   10991 
   10992 	if (wm_nvm_acquire(sc))
   10993 		return 1;
   10994 
   10995 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10996 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10997 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10998 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10999 	else if (sc->sc_type == WM_T_PCH_SPT)
   11000 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11001 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11002 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11003 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11004 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11005 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11006 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11007 	else
   11008 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11009 
   11010 	wm_nvm_release(sc);
   11011 	return rv;
   11012 }
   11013 
   11014 /*
   11015  * Hardware semaphores.
   11016  * Very complexed...
   11017  */
   11018 
   11019 static int
   11020 wm_get_swsm_semaphore(struct wm_softc *sc)
   11021 {
   11022 	int32_t timeout;
   11023 	uint32_t swsm;
   11024 
   11025 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11026 		/* Get the SW semaphore. */
   11027 		timeout = sc->sc_nvm_wordsize + 1;
   11028 		while (timeout) {
   11029 			swsm = CSR_READ(sc, WMREG_SWSM);
   11030 
   11031 			if ((swsm & SWSM_SMBI) == 0)
   11032 				break;
   11033 
   11034 			delay(50);
   11035 			timeout--;
   11036 		}
   11037 
   11038 		if (timeout == 0) {
   11039 			aprint_error_dev(sc->sc_dev,
   11040 			    "could not acquire SWSM SMBI\n");
   11041 			return 1;
   11042 		}
   11043 	}
   11044 
   11045 	/* Get the FW semaphore. */
   11046 	timeout = sc->sc_nvm_wordsize + 1;
   11047 	while (timeout) {
   11048 		swsm = CSR_READ(sc, WMREG_SWSM);
   11049 		swsm |= SWSM_SWESMBI;
   11050 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11051 		/* If we managed to set the bit we got the semaphore. */
   11052 		swsm = CSR_READ(sc, WMREG_SWSM);
   11053 		if (swsm & SWSM_SWESMBI)
   11054 			break;
   11055 
   11056 		delay(50);
   11057 		timeout--;
   11058 	}
   11059 
   11060 	if (timeout == 0) {
   11061 		aprint_error_dev(sc->sc_dev,
   11062 		    "could not acquire SWSM SWESMBI\n");
   11063 		/* Release semaphores */
   11064 		wm_put_swsm_semaphore(sc);
   11065 		return 1;
   11066 	}
   11067 	return 0;
   11068 }
   11069 
   11070 static void
   11071 wm_put_swsm_semaphore(struct wm_softc *sc)
   11072 {
   11073 	uint32_t swsm;
   11074 
   11075 	swsm = CSR_READ(sc, WMREG_SWSM);
   11076 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11077 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11078 }
   11079 
   11080 static int
   11081 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11082 {
   11083 	uint32_t swfw_sync;
   11084 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11085 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11086 	int timeout = 200;
   11087 
   11088 	for (timeout = 0; timeout < 200; timeout++) {
   11089 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11090 			if (wm_get_swsm_semaphore(sc)) {
   11091 				aprint_error_dev(sc->sc_dev,
   11092 				    "%s: failed to get semaphore\n",
   11093 				    __func__);
   11094 				return 1;
   11095 			}
   11096 		}
   11097 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11098 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11099 			swfw_sync |= swmask;
   11100 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11101 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11102 				wm_put_swsm_semaphore(sc);
   11103 			return 0;
   11104 		}
   11105 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11106 			wm_put_swsm_semaphore(sc);
   11107 		delay(5000);
   11108 	}
   11109 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11110 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11111 	return 1;
   11112 }
   11113 
   11114 static void
   11115 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11116 {
   11117 	uint32_t swfw_sync;
   11118 
   11119 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11120 		while (wm_get_swsm_semaphore(sc) != 0)
   11121 			continue;
   11122 	}
   11123 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11124 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11125 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11126 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11127 		wm_put_swsm_semaphore(sc);
   11128 }
   11129 
   11130 static int
   11131 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11132 {
   11133 	uint32_t ext_ctrl;
   11134 	int timeout = 200;
   11135 
   11136 	for (timeout = 0; timeout < 200; timeout++) {
   11137 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11138 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11139 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11140 
   11141 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11142 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11143 			return 0;
   11144 		delay(5000);
   11145 	}
   11146 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11147 	    device_xname(sc->sc_dev), ext_ctrl);
   11148 	return 1;
   11149 }
   11150 
   11151 static void
   11152 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11153 {
   11154 	uint32_t ext_ctrl;
   11155 
   11156 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11157 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11158 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11159 }
   11160 
   11161 static int
   11162 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11163 {
   11164 	int i = 0;
   11165 	uint32_t reg;
   11166 
   11167 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11168 	do {
   11169 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11170 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11171 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11172 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11173 			break;
   11174 		delay(2*1000);
   11175 		i++;
   11176 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11177 
   11178 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11179 		wm_put_hw_semaphore_82573(sc);
   11180 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11181 		    device_xname(sc->sc_dev));
   11182 		return -1;
   11183 	}
   11184 
   11185 	return 0;
   11186 }
   11187 
   11188 static void
   11189 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11190 {
   11191 	uint32_t reg;
   11192 
   11193 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11194 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11195 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11196 }
   11197 
   11198 /*
   11199  * Management mode and power management related subroutines.
   11200  * BMC, AMT, suspend/resume and EEE.
   11201  */
   11202 
   11203 #ifdef WM_WOL
   11204 static int
   11205 wm_check_mng_mode(struct wm_softc *sc)
   11206 {
   11207 	int rv;
   11208 
   11209 	switch (sc->sc_type) {
   11210 	case WM_T_ICH8:
   11211 	case WM_T_ICH9:
   11212 	case WM_T_ICH10:
   11213 	case WM_T_PCH:
   11214 	case WM_T_PCH2:
   11215 	case WM_T_PCH_LPT:
   11216 	case WM_T_PCH_SPT:
   11217 		rv = wm_check_mng_mode_ich8lan(sc);
   11218 		break;
   11219 	case WM_T_82574:
   11220 	case WM_T_82583:
   11221 		rv = wm_check_mng_mode_82574(sc);
   11222 		break;
   11223 	case WM_T_82571:
   11224 	case WM_T_82572:
   11225 	case WM_T_82573:
   11226 	case WM_T_80003:
   11227 		rv = wm_check_mng_mode_generic(sc);
   11228 		break;
   11229 	default:
   11230 		/* noting to do */
   11231 		rv = 0;
   11232 		break;
   11233 	}
   11234 
   11235 	return rv;
   11236 }
   11237 
   11238 static int
   11239 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11240 {
   11241 	uint32_t fwsm;
   11242 
   11243 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11244 
   11245 	if (((fwsm & FWSM_FW_VALID) != 0)
   11246 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11247 		return 1;
   11248 
   11249 	return 0;
   11250 }
   11251 
   11252 static int
   11253 wm_check_mng_mode_82574(struct wm_softc *sc)
   11254 {
   11255 	uint16_t data;
   11256 
   11257 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11258 
   11259 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11260 		return 1;
   11261 
   11262 	return 0;
   11263 }
   11264 
   11265 static int
   11266 wm_check_mng_mode_generic(struct wm_softc *sc)
   11267 {
   11268 	uint32_t fwsm;
   11269 
   11270 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11271 
   11272 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11273 		return 1;
   11274 
   11275 	return 0;
   11276 }
   11277 #endif /* WM_WOL */
   11278 
   11279 static int
   11280 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11281 {
   11282 	uint32_t manc, fwsm, factps;
   11283 
   11284 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11285 		return 0;
   11286 
   11287 	manc = CSR_READ(sc, WMREG_MANC);
   11288 
   11289 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11290 		device_xname(sc->sc_dev), manc));
   11291 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11292 		return 0;
   11293 
   11294 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11295 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11296 		factps = CSR_READ(sc, WMREG_FACTPS);
   11297 		if (((factps & FACTPS_MNGCG) == 0)
   11298 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11299 			return 1;
   11300 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11301 		uint16_t data;
   11302 
   11303 		factps = CSR_READ(sc, WMREG_FACTPS);
   11304 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11305 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11306 			device_xname(sc->sc_dev), factps, data));
   11307 		if (((factps & FACTPS_MNGCG) == 0)
   11308 		    && ((data & NVM_CFG2_MNGM_MASK)
   11309 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11310 			return 1;
   11311 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11312 	    && ((manc & MANC_ASF_EN) == 0))
   11313 		return 1;
   11314 
   11315 	return 0;
   11316 }
   11317 
   11318 static bool
   11319 wm_phy_resetisblocked(struct wm_softc *sc)
   11320 {
   11321 	bool blocked = false;
   11322 	uint32_t reg;
   11323 	int i = 0;
   11324 
   11325 	switch (sc->sc_type) {
   11326 	case WM_T_ICH8:
   11327 	case WM_T_ICH9:
   11328 	case WM_T_ICH10:
   11329 	case WM_T_PCH:
   11330 	case WM_T_PCH2:
   11331 	case WM_T_PCH_LPT:
   11332 	case WM_T_PCH_SPT:
   11333 		do {
   11334 			reg = CSR_READ(sc, WMREG_FWSM);
   11335 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11336 				blocked = true;
   11337 				delay(10*1000);
   11338 				continue;
   11339 			}
   11340 			blocked = false;
   11341 		} while (blocked && (i++ < 10));
   11342 		return blocked;
   11343 		break;
   11344 	case WM_T_82571:
   11345 	case WM_T_82572:
   11346 	case WM_T_82573:
   11347 	case WM_T_82574:
   11348 	case WM_T_82583:
   11349 	case WM_T_80003:
   11350 		reg = CSR_READ(sc, WMREG_MANC);
   11351 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11352 			return true;
   11353 		else
   11354 			return false;
   11355 		break;
   11356 	default:
   11357 		/* no problem */
   11358 		break;
   11359 	}
   11360 
   11361 	return false;
   11362 }
   11363 
   11364 static void
   11365 wm_get_hw_control(struct wm_softc *sc)
   11366 {
   11367 	uint32_t reg;
   11368 
   11369 	switch (sc->sc_type) {
   11370 	case WM_T_82573:
   11371 		reg = CSR_READ(sc, WMREG_SWSM);
   11372 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11373 		break;
   11374 	case WM_T_82571:
   11375 	case WM_T_82572:
   11376 	case WM_T_82574:
   11377 	case WM_T_82583:
   11378 	case WM_T_80003:
   11379 	case WM_T_ICH8:
   11380 	case WM_T_ICH9:
   11381 	case WM_T_ICH10:
   11382 	case WM_T_PCH:
   11383 	case WM_T_PCH2:
   11384 	case WM_T_PCH_LPT:
   11385 	case WM_T_PCH_SPT:
   11386 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11387 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11388 		break;
   11389 	default:
   11390 		break;
   11391 	}
   11392 }
   11393 
   11394 static void
   11395 wm_release_hw_control(struct wm_softc *sc)
   11396 {
   11397 	uint32_t reg;
   11398 
   11399 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11400 		return;
   11401 
   11402 	if (sc->sc_type == WM_T_82573) {
   11403 		reg = CSR_READ(sc, WMREG_SWSM);
   11404 		reg &= ~SWSM_DRV_LOAD;
   11405 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11406 	} else {
   11407 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11408 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11409 	}
   11410 }
   11411 
   11412 static void
   11413 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11414 {
   11415 	uint32_t reg;
   11416 
   11417 	if (sc->sc_type < WM_T_PCH2)
   11418 		return;
   11419 
   11420 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11421 
   11422 	if (gate)
   11423 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11424 	else
   11425 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11426 
   11427 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11428 }
   11429 
   11430 static void
   11431 wm_smbustopci(struct wm_softc *sc)
   11432 {
   11433 	uint32_t fwsm, reg;
   11434 
   11435 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11436 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11437 
   11438 	/* Acquire semaphore */
   11439 	wm_get_swfwhw_semaphore(sc);
   11440 
   11441 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11442 	if (((fwsm & FWSM_FW_VALID) == 0)
   11443 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11444 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11445 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11446 			reg |= CTRL_EXT_FORCE_SMBUS;
   11447 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11448 			CSR_WRITE_FLUSH(sc);
   11449 			delay(50*1000);
   11450 		}
   11451 
   11452 		/* Toggle LANPHYPC */
   11453 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11454 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11455 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11456 		CSR_WRITE_FLUSH(sc);
   11457 		delay(10);
   11458 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11459 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11460 		CSR_WRITE_FLUSH(sc);
   11461 		delay(50*1000);
   11462 
   11463 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11464 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11465 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11467 		}
   11468 	}
   11469 
   11470 	/* Release semaphore */
   11471 	wm_put_swfwhw_semaphore(sc);
   11472 
   11473 	/*
   11474 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11475 	 */
   11476 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11477 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11478 }
   11479 
   11480 static void
   11481 wm_init_manageability(struct wm_softc *sc)
   11482 {
   11483 
   11484 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11485 		device_xname(sc->sc_dev), __func__));
   11486 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11487 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11488 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11489 
   11490 		/* Disable hardware interception of ARP */
   11491 		manc &= ~MANC_ARP_EN;
   11492 
   11493 		/* Enable receiving management packets to the host */
   11494 		if (sc->sc_type >= WM_T_82571) {
   11495 			manc |= MANC_EN_MNG2HOST;
   11496 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11497 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11498 		}
   11499 
   11500 		CSR_WRITE(sc, WMREG_MANC, manc);
   11501 	}
   11502 }
   11503 
   11504 static void
   11505 wm_release_manageability(struct wm_softc *sc)
   11506 {
   11507 
   11508 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11509 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11510 
   11511 		manc |= MANC_ARP_EN;
   11512 		if (sc->sc_type >= WM_T_82571)
   11513 			manc &= ~MANC_EN_MNG2HOST;
   11514 
   11515 		CSR_WRITE(sc, WMREG_MANC, manc);
   11516 	}
   11517 }
   11518 
   11519 static void
   11520 wm_get_wakeup(struct wm_softc *sc)
   11521 {
   11522 
   11523 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11524 	switch (sc->sc_type) {
   11525 	case WM_T_82573:
   11526 	case WM_T_82583:
   11527 		sc->sc_flags |= WM_F_HAS_AMT;
   11528 		/* FALLTHROUGH */
   11529 	case WM_T_80003:
   11530 	case WM_T_82541:
   11531 	case WM_T_82547:
   11532 	case WM_T_82571:
   11533 	case WM_T_82572:
   11534 	case WM_T_82574:
   11535 	case WM_T_82575:
   11536 	case WM_T_82576:
   11537 	case WM_T_82580:
   11538 	case WM_T_I350:
   11539 	case WM_T_I354:
   11540 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11541 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11542 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11543 		break;
   11544 	case WM_T_ICH8:
   11545 	case WM_T_ICH9:
   11546 	case WM_T_ICH10:
   11547 	case WM_T_PCH:
   11548 	case WM_T_PCH2:
   11549 	case WM_T_PCH_LPT:
   11550 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11551 		sc->sc_flags |= WM_F_HAS_AMT;
   11552 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11553 		break;
   11554 	default:
   11555 		break;
   11556 	}
   11557 
   11558 	/* 1: HAS_MANAGE */
   11559 	if (wm_enable_mng_pass_thru(sc) != 0)
   11560 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11561 
   11562 #ifdef WM_DEBUG
   11563 	printf("\n");
   11564 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11565 		printf("HAS_AMT,");
   11566 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11567 		printf("ARC_SUBSYS_VALID,");
   11568 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11569 		printf("ASF_FIRMWARE_PRES,");
   11570 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11571 		printf("HAS_MANAGE,");
   11572 	printf("\n");
   11573 #endif
   11574 	/*
   11575 	 * Note that the WOL flags is set after the resetting of the eeprom
   11576 	 * stuff
   11577 	 */
   11578 }
   11579 
   11580 #ifdef WM_WOL
   11581 /* WOL in the newer chipset interfaces (pchlan) */
   11582 static void
   11583 wm_enable_phy_wakeup(struct wm_softc *sc)
   11584 {
   11585 #if 0
   11586 	uint16_t preg;
   11587 
   11588 	/* Copy MAC RARs to PHY RARs */
   11589 
   11590 	/* Copy MAC MTA to PHY MTA */
   11591 
   11592 	/* Configure PHY Rx Control register */
   11593 
   11594 	/* Enable PHY wakeup in MAC register */
   11595 
   11596 	/* Configure and enable PHY wakeup in PHY registers */
   11597 
   11598 	/* Activate PHY wakeup */
   11599 
   11600 	/* XXX */
   11601 #endif
   11602 }
   11603 
   11604 /* Power down workaround on D3 */
   11605 static void
   11606 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11607 {
   11608 	uint32_t reg;
   11609 	int i;
   11610 
   11611 	for (i = 0; i < 2; i++) {
   11612 		/* Disable link */
   11613 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11614 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11615 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11616 
   11617 		/*
   11618 		 * Call gig speed drop workaround on Gig disable before
   11619 		 * accessing any PHY registers
   11620 		 */
   11621 		if (sc->sc_type == WM_T_ICH8)
   11622 			wm_gig_downshift_workaround_ich8lan(sc);
   11623 
   11624 		/* Write VR power-down enable */
   11625 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11626 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11627 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11628 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11629 
   11630 		/* Read it back and test */
   11631 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11632 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11633 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11634 			break;
   11635 
   11636 		/* Issue PHY reset and repeat at most one more time */
   11637 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11638 	}
   11639 }
   11640 
   11641 static void
   11642 wm_enable_wakeup(struct wm_softc *sc)
   11643 {
   11644 	uint32_t reg, pmreg;
   11645 	pcireg_t pmode;
   11646 
   11647 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11648 		&pmreg, NULL) == 0)
   11649 		return;
   11650 
   11651 	/* Advertise the wakeup capability */
   11652 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11653 	    | CTRL_SWDPIN(3));
   11654 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11655 
   11656 	/* ICH workaround */
   11657 	switch (sc->sc_type) {
   11658 	case WM_T_ICH8:
   11659 	case WM_T_ICH9:
   11660 	case WM_T_ICH10:
   11661 	case WM_T_PCH:
   11662 	case WM_T_PCH2:
   11663 	case WM_T_PCH_LPT:
   11664 	case WM_T_PCH_SPT:
   11665 		/* Disable gig during WOL */
   11666 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11667 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11668 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11669 		if (sc->sc_type == WM_T_PCH)
   11670 			wm_gmii_reset(sc);
   11671 
   11672 		/* Power down workaround */
   11673 		if (sc->sc_phytype == WMPHY_82577) {
   11674 			struct mii_softc *child;
   11675 
   11676 			/* Assume that the PHY is copper */
   11677 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11678 			if (child->mii_mpd_rev <= 2)
   11679 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11680 				    (768 << 5) | 25, 0x0444); /* magic num */
   11681 		}
   11682 		break;
   11683 	default:
   11684 		break;
   11685 	}
   11686 
   11687 	/* Keep the laser running on fiber adapters */
   11688 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11689 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11690 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11691 		reg |= CTRL_EXT_SWDPIN(3);
   11692 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11693 	}
   11694 
   11695 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11696 #if 0	/* for the multicast packet */
   11697 	reg |= WUFC_MC;
   11698 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11699 #endif
   11700 
   11701 	if (sc->sc_type == WM_T_PCH) {
   11702 		wm_enable_phy_wakeup(sc);
   11703 	} else {
   11704 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11705 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11706 	}
   11707 
   11708 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11709 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11710 		|| (sc->sc_type == WM_T_PCH2))
   11711 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11712 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11713 
   11714 	/* Request PME */
   11715 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11716 #if 0
   11717 	/* Disable WOL */
   11718 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11719 #else
   11720 	/* For WOL */
   11721 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11722 #endif
   11723 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11724 }
   11725 #endif /* WM_WOL */
   11726 
   11727 /* LPLU */
   11728 
   11729 static void
   11730 wm_lplu_d0_disable(struct wm_softc *sc)
   11731 {
   11732 	uint32_t reg;
   11733 
   11734 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11735 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11736 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11737 }
   11738 
   11739 static void
   11740 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11741 {
   11742 	uint32_t reg;
   11743 
   11744 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11745 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11746 	reg |= HV_OEM_BITS_ANEGNOW;
   11747 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11748 }
   11749 
   11750 /* EEE */
   11751 
   11752 static void
   11753 wm_set_eee_i350(struct wm_softc *sc)
   11754 {
   11755 	uint32_t ipcnfg, eeer;
   11756 
   11757 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11758 	eeer = CSR_READ(sc, WMREG_EEER);
   11759 
   11760 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11761 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11762 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11763 		    | EEER_LPI_FC);
   11764 	} else {
   11765 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11766 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11767 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11768 		    | EEER_LPI_FC);
   11769 	}
   11770 
   11771 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11772 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11773 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11774 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11775 }
   11776 
   11777 /*
   11778  * Workarounds (mainly PHY related).
   11779  * Basically, PHY's workarounds are in the PHY drivers.
   11780  */
   11781 
   11782 /* Work-around for 82566 Kumeran PCS lock loss */
   11783 static void
   11784 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11785 {
   11786 #if 0
   11787 	int miistatus, active, i;
   11788 	int reg;
   11789 
   11790 	miistatus = sc->sc_mii.mii_media_status;
   11791 
   11792 	/* If the link is not up, do nothing */
   11793 	if ((miistatus & IFM_ACTIVE) == 0)
   11794 		return;
   11795 
   11796 	active = sc->sc_mii.mii_media_active;
   11797 
   11798 	/* Nothing to do if the link is other than 1Gbps */
   11799 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11800 		return;
   11801 
   11802 	for (i = 0; i < 10; i++) {
   11803 		/* read twice */
   11804 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11805 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11806 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11807 			goto out;	/* GOOD! */
   11808 
   11809 		/* Reset the PHY */
   11810 		wm_gmii_reset(sc);
   11811 		delay(5*1000);
   11812 	}
   11813 
   11814 	/* Disable GigE link negotiation */
   11815 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11816 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11817 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11818 
   11819 	/*
   11820 	 * Call gig speed drop workaround on Gig disable before accessing
   11821 	 * any PHY registers.
   11822 	 */
   11823 	wm_gig_downshift_workaround_ich8lan(sc);
   11824 
   11825 out:
   11826 	return;
   11827 #endif
   11828 }
   11829 
   11830 /* WOL from S5 stops working */
   11831 static void
   11832 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11833 {
   11834 	uint16_t kmrn_reg;
   11835 
   11836 	/* Only for igp3 */
   11837 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11838 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11839 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11840 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11841 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11842 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11843 	}
   11844 }
   11845 
   11846 /*
   11847  * Workaround for pch's PHYs
   11848  * XXX should be moved to new PHY driver?
   11849  */
   11850 static void
   11851 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11852 {
   11853 	if (sc->sc_phytype == WMPHY_82577)
   11854 		wm_set_mdio_slow_mode_hv(sc);
   11855 
   11856 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11857 
   11858 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11859 
   11860 	/* 82578 */
   11861 	if (sc->sc_phytype == WMPHY_82578) {
   11862 		/* PCH rev. < 3 */
   11863 		if (sc->sc_rev < 3) {
   11864 			/* XXX 6 bit shift? Why? Is it page2? */
   11865 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11866 			    0x66c0);
   11867 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11868 			    0xffff);
   11869 		}
   11870 
   11871 		/* XXX phy rev. < 2 */
   11872 	}
   11873 
   11874 	/* Select page 0 */
   11875 
   11876 	/* XXX acquire semaphore */
   11877 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11878 	/* XXX release semaphore */
   11879 
   11880 	/*
   11881 	 * Configure the K1 Si workaround during phy reset assuming there is
   11882 	 * link so that it disables K1 if link is in 1Gbps.
   11883 	 */
   11884 	wm_k1_gig_workaround_hv(sc, 1);
   11885 }
   11886 
   11887 static void
   11888 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11889 {
   11890 
   11891 	wm_set_mdio_slow_mode_hv(sc);
   11892 }
   11893 
   11894 static void
   11895 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11896 {
   11897 	int k1_enable = sc->sc_nvm_k1_enabled;
   11898 
   11899 	/* XXX acquire semaphore */
   11900 
   11901 	if (link) {
   11902 		k1_enable = 0;
   11903 
   11904 		/* Link stall fix for link up */
   11905 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11906 	} else {
   11907 		/* Link stall fix for link down */
   11908 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11909 	}
   11910 
   11911 	wm_configure_k1_ich8lan(sc, k1_enable);
   11912 
   11913 	/* XXX release semaphore */
   11914 }
   11915 
   11916 static void
   11917 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11918 {
   11919 	uint32_t reg;
   11920 
   11921 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11922 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11923 	    reg | HV_KMRN_MDIO_SLOW);
   11924 }
   11925 
   11926 static void
   11927 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11928 {
   11929 	uint32_t ctrl, ctrl_ext, tmp;
   11930 	uint16_t kmrn_reg;
   11931 
   11932 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11933 
   11934 	if (k1_enable)
   11935 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11936 	else
   11937 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11938 
   11939 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11940 
   11941 	delay(20);
   11942 
   11943 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11944 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11945 
   11946 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11947 	tmp |= CTRL_FRCSPD;
   11948 
   11949 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11950 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11951 	CSR_WRITE_FLUSH(sc);
   11952 	delay(20);
   11953 
   11954 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11955 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11956 	CSR_WRITE_FLUSH(sc);
   11957 	delay(20);
   11958 }
   11959 
   11960 /* special case - for 82575 - need to do manual init ... */
   11961 static void
   11962 wm_reset_init_script_82575(struct wm_softc *sc)
   11963 {
   11964 	/*
   11965 	 * remark: this is untested code - we have no board without EEPROM
   11966 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11967 	 */
   11968 
   11969 	/* SerDes configuration via SERDESCTRL */
   11970 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11971 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11972 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11973 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11974 
   11975 	/* CCM configuration via CCMCTL register */
   11976 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11977 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11978 
   11979 	/* PCIe lanes configuration */
   11980 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11981 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11982 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11983 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11984 
   11985 	/* PCIe PLL Configuration */
   11986 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11987 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11988 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11989 }
   11990 
   11991 static void
   11992 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11993 {
   11994 	uint32_t reg;
   11995 	uint16_t nvmword;
   11996 	int rv;
   11997 
   11998 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11999 		return;
   12000 
   12001 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12002 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12003 	if (rv != 0) {
   12004 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12005 		    __func__);
   12006 		return;
   12007 	}
   12008 
   12009 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12010 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12011 		reg |= MDICNFG_DEST;
   12012 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12013 		reg |= MDICNFG_COM_MDIO;
   12014 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12015 }
   12016 
   12017 /*
   12018  * I210 Errata 25 and I211 Errata 10
   12019  * Slow System Clock.
   12020  */
   12021 static void
   12022 wm_pll_workaround_i210(struct wm_softc *sc)
   12023 {
   12024 	uint32_t mdicnfg, wuc;
   12025 	uint32_t reg;
   12026 	pcireg_t pcireg;
   12027 	uint32_t pmreg;
   12028 	uint16_t nvmword, tmp_nvmword;
   12029 	int phyval;
   12030 	bool wa_done = false;
   12031 	int i;
   12032 
   12033 	/* Save WUC and MDICNFG registers */
   12034 	wuc = CSR_READ(sc, WMREG_WUC);
   12035 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12036 
   12037 	reg = mdicnfg & ~MDICNFG_DEST;
   12038 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12039 
   12040 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12041 		nvmword = INVM_DEFAULT_AL;
   12042 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12043 
   12044 	/* Get Power Management cap offset */
   12045 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12046 		&pmreg, NULL) == 0)
   12047 		return;
   12048 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12049 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12050 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12051 
   12052 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12053 			break; /* OK */
   12054 		}
   12055 
   12056 		wa_done = true;
   12057 		/* Directly reset the internal PHY */
   12058 		reg = CSR_READ(sc, WMREG_CTRL);
   12059 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12060 
   12061 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12062 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12063 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12064 
   12065 		CSR_WRITE(sc, WMREG_WUC, 0);
   12066 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12067 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12068 
   12069 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12070 		    pmreg + PCI_PMCSR);
   12071 		pcireg |= PCI_PMCSR_STATE_D3;
   12072 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12073 		    pmreg + PCI_PMCSR, pcireg);
   12074 		delay(1000);
   12075 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12076 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12077 		    pmreg + PCI_PMCSR, pcireg);
   12078 
   12079 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12080 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12081 
   12082 		/* Restore WUC register */
   12083 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12084 	}
   12085 
   12086 	/* Restore MDICNFG setting */
   12087 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12088 	if (wa_done)
   12089 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12090 }
   12091