Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.357
      1 /*	$NetBSD: if_wm.c,v 1.357 2015/10/13 08:11:31 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- EEE (Energy Efficiency Ethernet)
     77  *	- Multi queue
     78  *	- Image Unique ID
     79  *	- LPLU other than PCH*
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.357 2015/10/13 08:11:31 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    151 
    152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    153 #else
    154 #define	DPRINTF(x, y)	/* nothing */
    155 #endif /* WM_DEBUG */
    156 
    157 #ifdef NET_MPSAFE
    158 #define WM_MPSAFE	1
    159 #endif
    160 
    161 #ifdef __HAVE_PCI_MSI_MSIX
    162 #define WM_MSI_MSIX	1 /* Enable by default */
    163 #endif
    164 
    165 /*
    166  * This device driver divides interrupt to TX, RX and link state.
    167  * Each MSI-X vector indexes are below.
    168  */
    169 #define WM_MSIX_NINTR		3
    170 #define WM_MSIX_TXINTR_IDX	0
    171 #define WM_MSIX_RXINTR_IDX	1
    172 #define WM_MSIX_LINKINTR_IDX	2
    173 #define WM_MAX_NINTR		WM_MSIX_NINTR
    174 
    175 /*
    176  * This device driver set affinity to each interrupts like below (round-robin).
    177  * If the number CPUs is less than the number of interrupts, this driver usase
    178  * the same CPU for multiple interrupts.
    179  */
    180 #define WM_MSIX_TXINTR_CPUID	0
    181 #define WM_MSIX_RXINTR_CPUID	1
    182 #define WM_MSIX_LINKINTR_CPUID	2
    183 
    184 /*
    185  * Transmit descriptor list size.  Due to errata, we can only have
    186  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    187  * on >= 82544.  We tell the upper layers that they can queue a lot
    188  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    189  * of them at a time.
    190  *
    191  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    192  * chains containing many small mbufs have been observed in zero-copy
    193  * situations with jumbo frames.
    194  */
    195 #define	WM_NTXSEGS		256
    196 #define	WM_IFQUEUELEN		256
    197 #define	WM_TXQUEUELEN_MAX	64
    198 #define	WM_TXQUEUELEN_MAX_82547	16
    199 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    200 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    201 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    202 #define	WM_NTXDESC_82542	256
    203 #define	WM_NTXDESC_82544	4096
    204 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    205 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    206 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    207 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    208 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    209 
    210 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    211 
    212 /*
    213  * Receive descriptor list size.  We have one Rx buffer for normal
    214  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    215  * packet.  We allocate 256 receive descriptors, each with a 2k
    216  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    217  */
    218 #define	WM_NRXDESC		256
    219 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    220 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    221 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    222 
    223 typedef union txdescs {
    224 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    225 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    226 } txdescs_t;
    227 
    228 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    229 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    230 
    231 /*
    232  * Software state for transmit jobs.
    233  */
    234 struct wm_txsoft {
    235 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    236 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    237 	int txs_firstdesc;		/* first descriptor in packet */
    238 	int txs_lastdesc;		/* last descriptor in packet */
    239 	int txs_ndesc;			/* # of descriptors used */
    240 };
    241 
    242 /*
    243  * Software state for receive buffers.  Each descriptor gets a
    244  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    245  * more than one buffer, we chain them together.
    246  */
    247 struct wm_rxsoft {
    248 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    249 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    250 };
    251 
    252 #define WM_LINKUP_TIMEOUT	50
    253 
    254 static uint16_t swfwphysem[] = {
    255 	SWFW_PHY0_SM,
    256 	SWFW_PHY1_SM,
    257 	SWFW_PHY2_SM,
    258 	SWFW_PHY3_SM
    259 };
    260 
    261 static const uint32_t wm_82580_rxpbs_table[] = {
    262 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    263 };
    264 
    265 struct wm_softc;
    266 
    267 struct wm_txqueue {
    268 	kmutex_t *txq_lock;		/* lock for tx operations */
    269 
    270 	struct wm_softc *txq_sc;
    271 
    272 	/* Software state for the transmit descriptors. */
    273 	int txq_num;			/* must be a power of two */
    274 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    275 
    276 	/* TX control data structures. */
    277 	int txq_ndesc;			/* must be a power of two */
    278 	txdescs_t *txq_descs_u;
    279         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    280 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    281 	int txq_desc_rseg;		/* real number of control segment */
    282 	size_t txq_desc_size;		/* control data size */
    283 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    284 #define	txq_descs	txq_descs_u->sctxu_txdescs
    285 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    286 
    287 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    288 
    289 	int txq_free;			/* number of free Tx descriptors */
    290 	int txq_next;			/* next ready Tx descriptor */
    291 
    292 	int txq_sfree;			/* number of free Tx jobs */
    293 	int txq_snext;			/* next free Tx job */
    294 	int txq_sdirty;			/* dirty Tx jobs */
    295 
    296 	/* These 4 variables are used only on the 82547. */
    297 	int txq_fifo_size;		/* Tx FIFO size */
    298 	int txq_fifo_head;		/* current head of FIFO */
    299 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    300 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    301 
    302 	/* XXX which event counter is required? */
    303 };
    304 
    305 struct wm_rxqueue {
    306 	kmutex_t *rxq_lock;		/* lock for rx operations */
    307 
    308 	struct wm_softc *rxq_sc;
    309 
    310 	/* Software state for the receive descriptors. */
    311 	wiseman_rxdesc_t *rxq_descs;
    312 
    313 	/* RX control data structures. */
    314 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    315 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    316 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    317 	int rxq_desc_rseg;		/* real number of control segment */
    318 	size_t rxq_desc_size;		/* control data size */
    319 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    320 
    321 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    322 
    323 	int rxq_ptr;			/* next ready Rx descriptor/queue ent */
    324 	int rxq_discard;
    325 	int rxq_len;
    326 	struct mbuf *rxq_head;
    327 	struct mbuf *rxq_tail;
    328 	struct mbuf **rxq_tailp;
    329 
    330 	/* XXX which event counter is required? */
    331 };
    332 
    333 /*
    334  * Software state per device.
    335  */
    336 struct wm_softc {
    337 	device_t sc_dev;		/* generic device information */
    338 	bus_space_tag_t sc_st;		/* bus space tag */
    339 	bus_space_handle_t sc_sh;	/* bus space handle */
    340 	bus_size_t sc_ss;		/* bus space size */
    341 	bus_space_tag_t sc_iot;		/* I/O space tag */
    342 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    343 	bus_size_t sc_ios;		/* I/O space size */
    344 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    345 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    346 	bus_size_t sc_flashs;		/* flash registers space size */
    347 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    348 
    349 	struct ethercom sc_ethercom;	/* ethernet common data */
    350 	struct mii_data sc_mii;		/* MII/media information */
    351 
    352 	pci_chipset_tag_t sc_pc;
    353 	pcitag_t sc_pcitag;
    354 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    355 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    356 
    357 	uint16_t sc_pcidevid;		/* PCI device ID */
    358 	wm_chip_type sc_type;		/* MAC type */
    359 	int sc_rev;			/* MAC revision */
    360 	wm_phy_type sc_phytype;		/* PHY type */
    361 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    362 #define	WM_MEDIATYPE_UNKNOWN		0x00
    363 #define	WM_MEDIATYPE_FIBER		0x01
    364 #define	WM_MEDIATYPE_COPPER		0x02
    365 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    366 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    367 	int sc_flags;			/* flags; see below */
    368 	int sc_if_flags;		/* last if_flags */
    369 	int sc_flowflags;		/* 802.3x flow control flags */
    370 	int sc_align_tweak;
    371 
    372 	void *sc_ihs[WM_MAX_NINTR];	/*
    373 					 * interrupt cookie.
    374 					 * legacy and msi use sc_ihs[0].
    375 					 */
    376 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    377 	int sc_nintrs;			/* number of interrupts */
    378 
    379 	callout_t sc_tick_ch;		/* tick callout */
    380 	bool sc_stopping;
    381 
    382 	int sc_nvm_ver_major;
    383 	int sc_nvm_ver_minor;
    384 	int sc_nvm_ver_build;
    385 	int sc_nvm_addrbits;		/* NVM address bits */
    386 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    387 	int sc_ich8_flash_base;
    388 	int sc_ich8_flash_bank_size;
    389 	int sc_nvm_k1_enabled;
    390 
    391 	int sc_ntxqueues;
    392 	struct wm_txqueue *sc_txq;
    393 
    394 	int sc_nrxqueues;
    395 	struct wm_rxqueue *sc_rxq;
    396 
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* Event counters. */
    399 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    400 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    401 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    402 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    403 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    404 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    405 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    406 
    407 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    408 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    409 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    410 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    411 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    412 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    413 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    414 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    415 
    416 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    417 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    418 
    419 	struct evcnt sc_ev_tu;		/* Tx underrun */
    420 
    421 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    422 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    423 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    424 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    425 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    426 #endif /* WM_EVENT_COUNTERS */
    427 
    428 	/* This variable are used only on the 82547. */
    429 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    430 
    431 	uint32_t sc_ctrl;		/* prototype CTRL register */
    432 #if 0
    433 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    434 #endif
    435 	uint32_t sc_icr;		/* prototype interrupt bits */
    436 	uint32_t sc_itr;		/* prototype intr throttling reg */
    437 	uint32_t sc_tctl;		/* prototype TCTL register */
    438 	uint32_t sc_rctl;		/* prototype RCTL register */
    439 	uint32_t sc_txcw;		/* prototype TXCW register */
    440 	uint32_t sc_tipg;		/* prototype TIPG register */
    441 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    442 	uint32_t sc_pba;		/* prototype PBA register */
    443 
    444 	int sc_tbi_linkup;		/* TBI link status */
    445 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    446 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    447 
    448 	int sc_mchash_type;		/* multicast filter offset */
    449 
    450 	krndsource_t rnd_source;	/* random source */
    451 
    452 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    453 };
    454 
    455 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    456 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    457 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    458 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    459 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    460 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    461 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    462 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    463 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    464 
    465 #ifdef WM_MPSAFE
    466 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    467 #else
    468 #define CALLOUT_FLAGS	0
    469 #endif
    470 
    471 #define	WM_RXCHAIN_RESET(rxq)						\
    472 do {									\
    473 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    474 	*(rxq)->rxq_tailp = NULL;					\
    475 	(rxq)->rxq_len = 0;						\
    476 } while (/*CONSTCOND*/0)
    477 
    478 #define	WM_RXCHAIN_LINK(rxq, m)						\
    479 do {									\
    480 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    481 	(rxq)->rxq_tailp = &(m)->m_next;				\
    482 } while (/*CONSTCOND*/0)
    483 
    484 #ifdef WM_EVENT_COUNTERS
    485 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    486 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    487 #else
    488 #define	WM_EVCNT_INCR(ev)	/* nothing */
    489 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    490 #endif
    491 
    492 #define	CSR_READ(sc, reg)						\
    493 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    494 #define	CSR_WRITE(sc, reg, val)						\
    495 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    496 #define	CSR_WRITE_FLUSH(sc)						\
    497 	(void) CSR_READ((sc), WMREG_STATUS)
    498 
    499 #define ICH8_FLASH_READ32(sc, reg) \
    500 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    501 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    502 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    503 
    504 #define ICH8_FLASH_READ16(sc, reg) \
    505 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    506 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    507 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    508 
    509 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    510 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    511 
    512 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    513 #define	WM_CDTXADDR_HI(txq, x)						\
    514 	(sizeof(bus_addr_t) == 8 ?					\
    515 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    516 
    517 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    518 #define	WM_CDRXADDR_HI(rxq, x)						\
    519 	(sizeof(bus_addr_t) == 8 ?					\
    520 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    521 
    522 /*
    523  * Register read/write functions.
    524  * Other than CSR_{READ|WRITE}().
    525  */
    526 #if 0
    527 static inline uint32_t wm_io_read(struct wm_softc *, int);
    528 #endif
    529 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    530 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    531 	uint32_t, uint32_t);
    532 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    533 
    534 /*
    535  * Descriptor sync/init functions.
    536  */
    537 static inline void wm_cdtxsync(struct wm_softc *, int, int, int);
    538 static inline void wm_cdrxsync(struct wm_softc *, int, int);
    539 static inline void wm_init_rxdesc(struct wm_softc *, int);
    540 
    541 /*
    542  * Device driver interface functions and commonly used functions.
    543  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    544  */
    545 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    546 static int	wm_match(device_t, cfdata_t, void *);
    547 static void	wm_attach(device_t, device_t, void *);
    548 static int	wm_detach(device_t, int);
    549 static bool	wm_suspend(device_t, const pmf_qual_t *);
    550 static bool	wm_resume(device_t, const pmf_qual_t *);
    551 static void	wm_watchdog(struct ifnet *);
    552 static void	wm_tick(void *);
    553 static int	wm_ifflags_cb(struct ethercom *);
    554 static int	wm_ioctl(struct ifnet *, u_long, void *);
    555 /* MAC address related */
    556 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    557 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    558 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    559 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    560 static void	wm_set_filter(struct wm_softc *);
    561 /* Reset and init related */
    562 static void	wm_set_vlan(struct wm_softc *);
    563 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    564 static void	wm_get_auto_rd_done(struct wm_softc *);
    565 static void	wm_lan_init_done(struct wm_softc *);
    566 static void	wm_get_cfg_done(struct wm_softc *);
    567 static void	wm_initialize_hardware_bits(struct wm_softc *);
    568 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    569 static void	wm_reset(struct wm_softc *);
    570 static int	wm_add_rxbuf(struct wm_softc *, int);
    571 static void	wm_rxdrain(struct wm_softc *);
    572 static int	wm_init(struct ifnet *);
    573 static int	wm_init_locked(struct ifnet *);
    574 static void	wm_stop(struct ifnet *, int);
    575 static void	wm_stop_locked(struct ifnet *, int);
    576 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    577     uint32_t *, uint8_t *);
    578 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    579 static void	wm_82547_txfifo_stall(void *);
    580 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    581 /* DMA related */
    582 static int	wm_alloc_tx_descs(struct wm_softc *);
    583 static void	wm_free_tx_descs(struct wm_softc *);
    584 static void	wm_init_tx_descs(struct wm_softc *);
    585 static int	wm_alloc_rx_descs(struct wm_softc *);
    586 static void	wm_free_rx_descs(struct wm_softc *);
    587 static void	wm_init_rx_descs(struct wm_softc *);
    588 static int	wm_alloc_tx_buffer(struct wm_softc *);
    589 static void	wm_free_tx_buffer(struct wm_softc *);
    590 static void	wm_init_tx_buffer(struct wm_softc *);
    591 static int	wm_alloc_rx_buffer(struct wm_softc *);
    592 static void	wm_free_rx_buffer(struct wm_softc *);
    593 static int	wm_init_rx_buffer(struct wm_softc *);
    594 static void	wm_init_tx_queue(struct wm_softc *);
    595 static int	wm_init_rx_queue(struct wm_softc *);
    596 static int	wm_alloc_txrx_queues(struct wm_softc *);
    597 static void	wm_free_txrx_queues(struct wm_softc *);
    598 static int	wm_init_txrx_queues(struct wm_softc *);
    599 /* Start */
    600 static void	wm_start(struct ifnet *);
    601 static void	wm_start_locked(struct ifnet *);
    602 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    603     uint32_t *, uint32_t *, bool *);
    604 static void	wm_nq_start(struct ifnet *);
    605 static void	wm_nq_start_locked(struct ifnet *);
    606 /* Interrupt */
    607 static int	wm_txeof(struct wm_softc *);
    608 static void	wm_rxeof(struct wm_softc *);
    609 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    610 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    611 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    612 static void	wm_linkintr(struct wm_softc *, uint32_t);
    613 static int	wm_intr_legacy(void *);
    614 #ifdef WM_MSI_MSIX
    615 static int	wm_txintr_msix(void *);
    616 static int	wm_rxintr_msix(void *);
    617 static int	wm_linkintr_msix(void *);
    618 #endif
    619 
    620 /*
    621  * Media related.
    622  * GMII, SGMII, TBI, SERDES and SFP.
    623  */
    624 /* Common */
    625 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    626 /* GMII related */
    627 static void	wm_gmii_reset(struct wm_softc *);
    628 static int	wm_get_phy_id_82575(struct wm_softc *);
    629 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    630 static int	wm_gmii_mediachange(struct ifnet *);
    631 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    632 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    633 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    634 static int	wm_gmii_i82543_readreg(device_t, int, int);
    635 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    636 static int	wm_gmii_i82544_readreg(device_t, int, int);
    637 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    638 static int	wm_gmii_i80003_readreg(device_t, int, int);
    639 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    640 static int	wm_gmii_bm_readreg(device_t, int, int);
    641 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    642 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    643 static int	wm_gmii_hv_readreg(device_t, int, int);
    644 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    645 static int	wm_gmii_82580_readreg(device_t, int, int);
    646 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    647 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    648 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    649 static void	wm_gmii_statchg(struct ifnet *);
    650 static int	wm_kmrn_readreg(struct wm_softc *, int);
    651 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    652 /* SGMII */
    653 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    654 static int	wm_sgmii_readreg(device_t, int, int);
    655 static void	wm_sgmii_writereg(device_t, int, int, int);
    656 /* TBI related */
    657 static void	wm_tbi_mediainit(struct wm_softc *);
    658 static int	wm_tbi_mediachange(struct ifnet *);
    659 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    660 static int	wm_check_for_link(struct wm_softc *);
    661 static void	wm_tbi_tick(struct wm_softc *);
    662 /* SERDES related */
    663 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    664 static int	wm_serdes_mediachange(struct ifnet *);
    665 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    666 static void	wm_serdes_tick(struct wm_softc *);
    667 /* SFP related */
    668 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    669 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    670 
    671 /*
    672  * NVM related.
    673  * Microwire, SPI (w/wo EERD) and Flash.
    674  */
    675 /* Misc functions */
    676 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    677 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    678 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    679 /* Microwire */
    680 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    681 /* SPI */
    682 static int	wm_nvm_ready_spi(struct wm_softc *);
    683 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    684 /* Using with EERD */
    685 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    686 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    687 /* Flash */
    688 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    689     unsigned int *);
    690 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    691 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    692 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    693 	uint16_t *);
    694 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    695 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    696 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    697 /* iNVM */
    698 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    699 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    700 /* Lock, detecting NVM type, validate checksum and read */
    701 static int	wm_nvm_acquire(struct wm_softc *);
    702 static void	wm_nvm_release(struct wm_softc *);
    703 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    704 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    705 static int	wm_nvm_validate_checksum(struct wm_softc *);
    706 static void	wm_nvm_version_invm(struct wm_softc *);
    707 static void	wm_nvm_version(struct wm_softc *);
    708 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    709 
    710 /*
    711  * Hardware semaphores.
    712  * Very complexed...
    713  */
    714 static int	wm_get_swsm_semaphore(struct wm_softc *);
    715 static void	wm_put_swsm_semaphore(struct wm_softc *);
    716 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    717 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    718 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    719 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    720 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    721 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    722 
    723 /*
    724  * Management mode and power management related subroutines.
    725  * BMC, AMT, suspend/resume and EEE.
    726  */
    727 static int	wm_check_mng_mode(struct wm_softc *);
    728 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    729 static int	wm_check_mng_mode_82574(struct wm_softc *);
    730 static int	wm_check_mng_mode_generic(struct wm_softc *);
    731 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    732 static int	wm_check_reset_block(struct wm_softc *);
    733 static void	wm_get_hw_control(struct wm_softc *);
    734 static void	wm_release_hw_control(struct wm_softc *);
    735 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    736 static void	wm_smbustopci(struct wm_softc *);
    737 static void	wm_init_manageability(struct wm_softc *);
    738 static void	wm_release_manageability(struct wm_softc *);
    739 static void	wm_get_wakeup(struct wm_softc *);
    740 #ifdef WM_WOL
    741 static void	wm_enable_phy_wakeup(struct wm_softc *);
    742 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    743 static void	wm_enable_wakeup(struct wm_softc *);
    744 #endif
    745 /* EEE */
    746 static void	wm_set_eee_i350(struct wm_softc *);
    747 
    748 /*
    749  * Workarounds (mainly PHY related).
    750  * Basically, PHY's workarounds are in the PHY drivers.
    751  */
    752 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    753 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    754 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    755 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    756 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    757 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    758 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    759 static void	wm_reset_init_script_82575(struct wm_softc *);
    760 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    761 static void	wm_pll_workaround_i210(struct wm_softc *);
    762 
    763 #ifdef WM_MSI_MSIX
    764 struct _msix_matrix {
    765 	const char *intrname;
    766 	int(*func)(void *);
    767 	int intridx;
    768 	int cpuid;
    769 } msix_matrix[WM_MSIX_NINTR] = {
    770 	{ "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
    771 	{ "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
    772 	{ "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
    773 	  WM_MSIX_LINKINTR_CPUID },
    774 };
    775 #endif
    776 
    777 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    778     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    779 
    780 /*
    781  * Devices supported by this driver.
    782  */
    783 static const struct wm_product {
    784 	pci_vendor_id_t		wmp_vendor;
    785 	pci_product_id_t	wmp_product;
    786 	const char		*wmp_name;
    787 	wm_chip_type		wmp_type;
    788 	uint32_t		wmp_flags;
    789 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    790 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    791 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    792 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    793 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    794 } wm_products[] = {
    795 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    796 	  "Intel i82542 1000BASE-X Ethernet",
    797 	  WM_T_82542_2_1,	WMP_F_FIBER },
    798 
    799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    800 	  "Intel i82543GC 1000BASE-X Ethernet",
    801 	  WM_T_82543,		WMP_F_FIBER },
    802 
    803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    804 	  "Intel i82543GC 1000BASE-T Ethernet",
    805 	  WM_T_82543,		WMP_F_COPPER },
    806 
    807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    808 	  "Intel i82544EI 1000BASE-T Ethernet",
    809 	  WM_T_82544,		WMP_F_COPPER },
    810 
    811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    812 	  "Intel i82544EI 1000BASE-X Ethernet",
    813 	  WM_T_82544,		WMP_F_FIBER },
    814 
    815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    816 	  "Intel i82544GC 1000BASE-T Ethernet",
    817 	  WM_T_82544,		WMP_F_COPPER },
    818 
    819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    820 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    821 	  WM_T_82544,		WMP_F_COPPER },
    822 
    823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    824 	  "Intel i82540EM 1000BASE-T Ethernet",
    825 	  WM_T_82540,		WMP_F_COPPER },
    826 
    827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    828 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    829 	  WM_T_82540,		WMP_F_COPPER },
    830 
    831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    832 	  "Intel i82540EP 1000BASE-T Ethernet",
    833 	  WM_T_82540,		WMP_F_COPPER },
    834 
    835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    836 	  "Intel i82540EP 1000BASE-T Ethernet",
    837 	  WM_T_82540,		WMP_F_COPPER },
    838 
    839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    840 	  "Intel i82540EP 1000BASE-T Ethernet",
    841 	  WM_T_82540,		WMP_F_COPPER },
    842 
    843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    844 	  "Intel i82545EM 1000BASE-T Ethernet",
    845 	  WM_T_82545,		WMP_F_COPPER },
    846 
    847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    848 	  "Intel i82545GM 1000BASE-T Ethernet",
    849 	  WM_T_82545_3,		WMP_F_COPPER },
    850 
    851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    852 	  "Intel i82545GM 1000BASE-X Ethernet",
    853 	  WM_T_82545_3,		WMP_F_FIBER },
    854 
    855 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    856 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    857 	  WM_T_82545_3,		WMP_F_SERDES },
    858 
    859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    860 	  "Intel i82546EB 1000BASE-T Ethernet",
    861 	  WM_T_82546,		WMP_F_COPPER },
    862 
    863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    864 	  "Intel i82546EB 1000BASE-T Ethernet",
    865 	  WM_T_82546,		WMP_F_COPPER },
    866 
    867 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    868 	  "Intel i82545EM 1000BASE-X Ethernet",
    869 	  WM_T_82545,		WMP_F_FIBER },
    870 
    871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    872 	  "Intel i82546EB 1000BASE-X Ethernet",
    873 	  WM_T_82546,		WMP_F_FIBER },
    874 
    875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    876 	  "Intel i82546GB 1000BASE-T Ethernet",
    877 	  WM_T_82546_3,		WMP_F_COPPER },
    878 
    879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    880 	  "Intel i82546GB 1000BASE-X Ethernet",
    881 	  WM_T_82546_3,		WMP_F_FIBER },
    882 
    883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    884 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    885 	  WM_T_82546_3,		WMP_F_SERDES },
    886 
    887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    888 	  "i82546GB quad-port Gigabit Ethernet",
    889 	  WM_T_82546_3,		WMP_F_COPPER },
    890 
    891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    892 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    893 	  WM_T_82546_3,		WMP_F_COPPER },
    894 
    895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    896 	  "Intel PRO/1000MT (82546GB)",
    897 	  WM_T_82546_3,		WMP_F_COPPER },
    898 
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    900 	  "Intel i82541EI 1000BASE-T Ethernet",
    901 	  WM_T_82541,		WMP_F_COPPER },
    902 
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    904 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    905 	  WM_T_82541,		WMP_F_COPPER },
    906 
    907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    908 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    909 	  WM_T_82541,		WMP_F_COPPER },
    910 
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    912 	  "Intel i82541ER 1000BASE-T Ethernet",
    913 	  WM_T_82541_2,		WMP_F_COPPER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    916 	  "Intel i82541GI 1000BASE-T Ethernet",
    917 	  WM_T_82541_2,		WMP_F_COPPER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    920 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    921 	  WM_T_82541_2,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    924 	  "Intel i82541PI 1000BASE-T Ethernet",
    925 	  WM_T_82541_2,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    928 	  "Intel i82547EI 1000BASE-T Ethernet",
    929 	  WM_T_82547,		WMP_F_COPPER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    932 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    933 	  WM_T_82547,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    936 	  "Intel i82547GI 1000BASE-T Ethernet",
    937 	  WM_T_82547_2,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    940 	  "Intel PRO/1000 PT (82571EB)",
    941 	  WM_T_82571,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    944 	  "Intel PRO/1000 PF (82571EB)",
    945 	  WM_T_82571,		WMP_F_FIBER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    948 	  "Intel PRO/1000 PB (82571EB)",
    949 	  WM_T_82571,		WMP_F_SERDES },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    952 	  "Intel PRO/1000 QT (82571EB)",
    953 	  WM_T_82571,		WMP_F_COPPER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    956 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    957 	  WM_T_82571,		WMP_F_COPPER, },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    960 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    961 	  WM_T_82571,		WMP_F_COPPER, },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    964 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    965 	  WM_T_82571,		WMP_F_SERDES, },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    968 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    969 	  WM_T_82571,		WMP_F_SERDES, },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    972 	  "Intel 82571EB Quad 1000baseX Ethernet",
    973 	  WM_T_82571,		WMP_F_FIBER, },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    976 	  "Intel i82572EI 1000baseT Ethernet",
    977 	  WM_T_82572,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    980 	  "Intel i82572EI 1000baseX Ethernet",
    981 	  WM_T_82572,		WMP_F_FIBER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    984 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    985 	  WM_T_82572,		WMP_F_SERDES },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    988 	  "Intel i82572EI 1000baseT Ethernet",
    989 	  WM_T_82572,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    992 	  "Intel i82573E",
    993 	  WM_T_82573,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    996 	  "Intel i82573E IAMT",
    997 	  WM_T_82573,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1000 	  "Intel i82573L Gigabit Ethernet",
   1001 	  WM_T_82573,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1004 	  "Intel i82574L",
   1005 	  WM_T_82574,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1008 	  "Intel i82574L",
   1009 	  WM_T_82574,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1012 	  "Intel i82583V",
   1013 	  WM_T_82583,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1016 	  "i80003 dual 1000baseT Ethernet",
   1017 	  WM_T_80003,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1020 	  "i80003 dual 1000baseX Ethernet",
   1021 	  WM_T_80003,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1024 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1025 	  WM_T_80003,		WMP_F_SERDES },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1028 	  "Intel i80003 1000baseT Ethernet",
   1029 	  WM_T_80003,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1032 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1033 	  WM_T_80003,		WMP_F_SERDES },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1036 	  "Intel i82801H (M_AMT) LAN Controller",
   1037 	  WM_T_ICH8,		WMP_F_COPPER },
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1039 	  "Intel i82801H (AMT) LAN Controller",
   1040 	  WM_T_ICH8,		WMP_F_COPPER },
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1042 	  "Intel i82801H LAN Controller",
   1043 	  WM_T_ICH8,		WMP_F_COPPER },
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1045 	  "Intel i82801H (IFE) LAN Controller",
   1046 	  WM_T_ICH8,		WMP_F_COPPER },
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1048 	  "Intel i82801H (M) LAN Controller",
   1049 	  WM_T_ICH8,		WMP_F_COPPER },
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1051 	  "Intel i82801H IFE (GT) LAN Controller",
   1052 	  WM_T_ICH8,		WMP_F_COPPER },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1054 	  "Intel i82801H IFE (G) LAN Controller",
   1055 	  WM_T_ICH8,		WMP_F_COPPER },
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1057 	  "82801I (AMT) LAN Controller",
   1058 	  WM_T_ICH9,		WMP_F_COPPER },
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1060 	  "82801I LAN Controller",
   1061 	  WM_T_ICH9,		WMP_F_COPPER },
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1063 	  "82801I (G) LAN Controller",
   1064 	  WM_T_ICH9,		WMP_F_COPPER },
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1066 	  "82801I (GT) LAN Controller",
   1067 	  WM_T_ICH9,		WMP_F_COPPER },
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1069 	  "82801I (C) LAN Controller",
   1070 	  WM_T_ICH9,		WMP_F_COPPER },
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1072 	  "82801I mobile LAN Controller",
   1073 	  WM_T_ICH9,		WMP_F_COPPER },
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1075 	  "82801I mobile (V) LAN Controller",
   1076 	  WM_T_ICH9,		WMP_F_COPPER },
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1078 	  "82801I mobile (AMT) LAN Controller",
   1079 	  WM_T_ICH9,		WMP_F_COPPER },
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1081 	  "82567LM-4 LAN Controller",
   1082 	  WM_T_ICH9,		WMP_F_COPPER },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1084 	  "82567V-3 LAN Controller",
   1085 	  WM_T_ICH9,		WMP_F_COPPER },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1087 	  "82567LM-2 LAN Controller",
   1088 	  WM_T_ICH10,		WMP_F_COPPER },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1090 	  "82567LF-2 LAN Controller",
   1091 	  WM_T_ICH10,		WMP_F_COPPER },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1093 	  "82567LM-3 LAN Controller",
   1094 	  WM_T_ICH10,		WMP_F_COPPER },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1096 	  "82567LF-3 LAN Controller",
   1097 	  WM_T_ICH10,		WMP_F_COPPER },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1099 	  "82567V-2 LAN Controller",
   1100 	  WM_T_ICH10,		WMP_F_COPPER },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1102 	  "82567V-3? LAN Controller",
   1103 	  WM_T_ICH10,		WMP_F_COPPER },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1105 	  "HANKSVILLE LAN Controller",
   1106 	  WM_T_ICH10,		WMP_F_COPPER },
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1108 	  "PCH LAN (82577LM) Controller",
   1109 	  WM_T_PCH,		WMP_F_COPPER },
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1111 	  "PCH LAN (82577LC) Controller",
   1112 	  WM_T_PCH,		WMP_F_COPPER },
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1114 	  "PCH LAN (82578DM) Controller",
   1115 	  WM_T_PCH,		WMP_F_COPPER },
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1117 	  "PCH LAN (82578DC) Controller",
   1118 	  WM_T_PCH,		WMP_F_COPPER },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1120 	  "PCH2 LAN (82579LM) Controller",
   1121 	  WM_T_PCH2,		WMP_F_COPPER },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1123 	  "PCH2 LAN (82579V) Controller",
   1124 	  WM_T_PCH2,		WMP_F_COPPER },
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1126 	  "82575EB dual-1000baseT Ethernet",
   1127 	  WM_T_82575,		WMP_F_COPPER },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1129 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1130 	  WM_T_82575,		WMP_F_SERDES },
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1132 	  "82575GB quad-1000baseT Ethernet",
   1133 	  WM_T_82575,		WMP_F_COPPER },
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1135 	  "82575GB quad-1000baseT Ethernet (PM)",
   1136 	  WM_T_82575,		WMP_F_COPPER },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1138 	  "82576 1000BaseT Ethernet",
   1139 	  WM_T_82576,		WMP_F_COPPER },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1141 	  "82576 1000BaseX Ethernet",
   1142 	  WM_T_82576,		WMP_F_FIBER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1145 	  "82576 gigabit Ethernet (SERDES)",
   1146 	  WM_T_82576,		WMP_F_SERDES },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1149 	  "82576 quad-1000BaseT Ethernet",
   1150 	  WM_T_82576,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1153 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1154 	  WM_T_82576,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1157 	  "82576 gigabit Ethernet",
   1158 	  WM_T_82576,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1161 	  "82576 gigabit Ethernet (SERDES)",
   1162 	  WM_T_82576,		WMP_F_SERDES },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1164 	  "82576 quad-gigabit Ethernet (SERDES)",
   1165 	  WM_T_82576,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1168 	  "82580 1000BaseT Ethernet",
   1169 	  WM_T_82580,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1171 	  "82580 1000BaseX Ethernet",
   1172 	  WM_T_82580,		WMP_F_FIBER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1175 	  "82580 1000BaseT Ethernet (SERDES)",
   1176 	  WM_T_82580,		WMP_F_SERDES },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1179 	  "82580 gigabit Ethernet (SGMII)",
   1180 	  WM_T_82580,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1182 	  "82580 dual-1000BaseT Ethernet",
   1183 	  WM_T_82580,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1186 	  "82580 quad-1000BaseX Ethernet",
   1187 	  WM_T_82580,		WMP_F_FIBER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1190 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1191 	  WM_T_82580,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1194 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82580,		WMP_F_SERDES },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1198 	  "DH89XXCC 1000BASE-KX Ethernet",
   1199 	  WM_T_82580,		WMP_F_SERDES },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1202 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1203 	  WM_T_82580,		WMP_F_SERDES },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1206 	  "I350 Gigabit Network Connection",
   1207 	  WM_T_I350,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1210 	  "I350 Gigabit Fiber Network Connection",
   1211 	  WM_T_I350,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1214 	  "I350 Gigabit Backplane Connection",
   1215 	  WM_T_I350,		WMP_F_SERDES },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1218 	  "I350 Quad Port Gigabit Ethernet",
   1219 	  WM_T_I350,		WMP_F_SERDES },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1222 	  "I350 Gigabit Connection",
   1223 	  WM_T_I350,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1226 	  "I354 Gigabit Ethernet (KX)",
   1227 	  WM_T_I354,		WMP_F_SERDES },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1230 	  "I354 Gigabit Ethernet (SGMII)",
   1231 	  WM_T_I354,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1234 	  "I354 Gigabit Ethernet (2.5G)",
   1235 	  WM_T_I354,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1238 	  "I210-T1 Ethernet Server Adapter",
   1239 	  WM_T_I210,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1242 	  "I210 Ethernet (Copper OEM)",
   1243 	  WM_T_I210,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1246 	  "I210 Ethernet (Copper IT)",
   1247 	  WM_T_I210,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1250 	  "I210 Ethernet (FLASH less)",
   1251 	  WM_T_I210,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1254 	  "I210 Gigabit Ethernet (Fiber)",
   1255 	  WM_T_I210,		WMP_F_FIBER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1258 	  "I210 Gigabit Ethernet (SERDES)",
   1259 	  WM_T_I210,		WMP_F_SERDES },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1262 	  "I210 Gigabit Ethernet (FLASH less)",
   1263 	  WM_T_I210,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1266 	  "I210 Gigabit Ethernet (SGMII)",
   1267 	  WM_T_I210,		WMP_F_COPPER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1270 	  "I211 Ethernet (COPPER)",
   1271 	  WM_T_I211,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1273 	  "I217 V Ethernet Connection",
   1274 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1276 	  "I217 LM Ethernet Connection",
   1277 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1279 	  "I218 V Ethernet Connection",
   1280 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1282 	  "I218 V Ethernet Connection",
   1283 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1285 	  "I218 V Ethernet Connection",
   1286 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1288 	  "I218 LM Ethernet Connection",
   1289 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1291 	  "I218 LM Ethernet Connection",
   1292 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1294 	  "I218 LM Ethernet Connection",
   1295 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1296 	{ 0,			0,
   1297 	  NULL,
   1298 	  0,			0 },
   1299 };
   1300 
   1301 #ifdef WM_EVENT_COUNTERS
   1302 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1303 #endif /* WM_EVENT_COUNTERS */
   1304 
   1305 
   1306 /*
   1307  * Register read/write functions.
   1308  * Other than CSR_{READ|WRITE}().
   1309  */
   1310 
   1311 #if 0 /* Not currently used */
   1312 static inline uint32_t
   1313 wm_io_read(struct wm_softc *sc, int reg)
   1314 {
   1315 
   1316 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1317 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1318 }
   1319 #endif
   1320 
   1321 static inline void
   1322 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1323 {
   1324 
   1325 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1326 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1327 }
   1328 
   1329 static inline void
   1330 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1331     uint32_t data)
   1332 {
   1333 	uint32_t regval;
   1334 	int i;
   1335 
   1336 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1337 
   1338 	CSR_WRITE(sc, reg, regval);
   1339 
   1340 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1341 		delay(5);
   1342 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1343 			break;
   1344 	}
   1345 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1346 		aprint_error("%s: WARNING:"
   1347 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1348 		    device_xname(sc->sc_dev), reg);
   1349 	}
   1350 }
   1351 
   1352 static inline void
   1353 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1354 {
   1355 	wa->wa_low = htole32(v & 0xffffffffU);
   1356 	if (sizeof(bus_addr_t) == 8)
   1357 		wa->wa_high = htole32((uint64_t) v >> 32);
   1358 	else
   1359 		wa->wa_high = 0;
   1360 }
   1361 
   1362 /*
   1363  * Descriptor sync/init functions.
   1364  */
   1365 static inline void
   1366 wm_cdtxsync(struct wm_softc *sc, int start, int num, int ops)
   1367 {
   1368 	struct wm_txqueue *txq = sc->sc_txq;
   1369 
   1370 	/* If it will wrap around, sync to the end of the ring. */
   1371 	if ((start + num) > WM_NTXDESC(txq)) {
   1372 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1373 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1374 		    (WM_NTXDESC(txq) - start), ops);
   1375 		num -= (WM_NTXDESC(txq) - start);
   1376 		start = 0;
   1377 	}
   1378 
   1379 	/* Now sync whatever is left. */
   1380 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1381 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1382 }
   1383 
   1384 static inline void
   1385 wm_cdrxsync(struct wm_softc *sc, int start, int ops)
   1386 {
   1387 	struct wm_rxqueue *rxq = sc->sc_rxq;
   1388 
   1389 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1390 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1391 }
   1392 
   1393 static inline void
   1394 wm_init_rxdesc(struct wm_softc *sc, int start)
   1395 {
   1396 	struct wm_rxqueue *rxq = sc->sc_rxq;
   1397 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1398 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1399 	struct mbuf *m = rxs->rxs_mbuf;
   1400 
   1401 	/*
   1402 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1403 	 * so that the payload after the Ethernet header is aligned
   1404 	 * to a 4-byte boundary.
   1405 
   1406 	 * XXX BRAINDAMAGE ALERT!
   1407 	 * The stupid chip uses the same size for every buffer, which
   1408 	 * is set in the Receive Control register.  We are using the 2K
   1409 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1410 	 * reason, we can't "scoot" packets longer than the standard
   1411 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1412 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1413 	 * the upper layer copy the headers.
   1414 	 */
   1415 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1416 
   1417 	wm_set_dma_addr(&rxd->wrx_addr,
   1418 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1419 	rxd->wrx_len = 0;
   1420 	rxd->wrx_cksum = 0;
   1421 	rxd->wrx_status = 0;
   1422 	rxd->wrx_errors = 0;
   1423 	rxd->wrx_special = 0;
   1424 	wm_cdrxsync(sc, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1425 
   1426 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1427 }
   1428 
   1429 /*
   1430  * Device driver interface functions and commonly used functions.
   1431  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1432  */
   1433 
   1434 /* Lookup supported device table */
   1435 static const struct wm_product *
   1436 wm_lookup(const struct pci_attach_args *pa)
   1437 {
   1438 	const struct wm_product *wmp;
   1439 
   1440 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1441 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1442 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1443 			return wmp;
   1444 	}
   1445 	return NULL;
   1446 }
   1447 
   1448 /* The match function (ca_match) */
   1449 static int
   1450 wm_match(device_t parent, cfdata_t cf, void *aux)
   1451 {
   1452 	struct pci_attach_args *pa = aux;
   1453 
   1454 	if (wm_lookup(pa) != NULL)
   1455 		return 1;
   1456 
   1457 	return 0;
   1458 }
   1459 
   1460 /* The attach function (ca_attach) */
   1461 static void
   1462 wm_attach(device_t parent, device_t self, void *aux)
   1463 {
   1464 	struct wm_softc *sc = device_private(self);
   1465 	struct pci_attach_args *pa = aux;
   1466 	prop_dictionary_t dict;
   1467 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1468 	pci_chipset_tag_t pc = pa->pa_pc;
   1469 #ifndef WM_MSI_MSIX
   1470 	pci_intr_handle_t ih;
   1471 #else
   1472 	int counts[PCI_INTR_TYPE_SIZE];
   1473 	pci_intr_type_t max_type;
   1474 #endif
   1475 	const char *intrstr = NULL;
   1476 	const char *eetype, *xname;
   1477 	bus_space_tag_t memt;
   1478 	bus_space_handle_t memh;
   1479 	bus_size_t memsize;
   1480 	int memh_valid;
   1481 	int i, error;
   1482 	const struct wm_product *wmp;
   1483 	prop_data_t ea;
   1484 	prop_number_t pn;
   1485 	uint8_t enaddr[ETHER_ADDR_LEN];
   1486 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1487 	pcireg_t preg, memtype;
   1488 	uint16_t eeprom_data, apme_mask;
   1489 	bool force_clear_smbi;
   1490 	uint32_t link_mode;
   1491 	uint32_t reg;
   1492 	char intrbuf[PCI_INTRSTR_LEN];
   1493 
   1494 	sc->sc_dev = self;
   1495 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1496 	sc->sc_stopping = false;
   1497 
   1498 	wmp = wm_lookup(pa);
   1499 #ifdef DIAGNOSTIC
   1500 	if (wmp == NULL) {
   1501 		printf("\n");
   1502 		panic("wm_attach: impossible");
   1503 	}
   1504 #endif
   1505 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1506 
   1507 	sc->sc_pc = pa->pa_pc;
   1508 	sc->sc_pcitag = pa->pa_tag;
   1509 
   1510 	if (pci_dma64_available(pa))
   1511 		sc->sc_dmat = pa->pa_dmat64;
   1512 	else
   1513 		sc->sc_dmat = pa->pa_dmat;
   1514 
   1515 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1516 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1517 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1518 
   1519 	sc->sc_type = wmp->wmp_type;
   1520 	if (sc->sc_type < WM_T_82543) {
   1521 		if (sc->sc_rev < 2) {
   1522 			aprint_error_dev(sc->sc_dev,
   1523 			    "i82542 must be at least rev. 2\n");
   1524 			return;
   1525 		}
   1526 		if (sc->sc_rev < 3)
   1527 			sc->sc_type = WM_T_82542_2_0;
   1528 	}
   1529 
   1530 	/*
   1531 	 * Disable MSI for Errata:
   1532 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1533 	 *
   1534 	 *  82544: Errata 25
   1535 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1536 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1537 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1538 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1539 	 *
   1540 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1541 	 *
   1542 	 *  82571 & 82572: Errata 63
   1543 	 */
   1544 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1545 	    || (sc->sc_type == WM_T_82572))
   1546 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1547 
   1548 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1549 	    || (sc->sc_type == WM_T_82580)
   1550 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1551 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1552 		sc->sc_flags |= WM_F_NEWQUEUE;
   1553 
   1554 	/* Set device properties (mactype) */
   1555 	dict = device_properties(sc->sc_dev);
   1556 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1557 
   1558 	/*
   1559 	 * Map the device.  All devices support memory-mapped acccess,
   1560 	 * and it is really required for normal operation.
   1561 	 */
   1562 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1563 	switch (memtype) {
   1564 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1565 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1566 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1567 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1568 		break;
   1569 	default:
   1570 		memh_valid = 0;
   1571 		break;
   1572 	}
   1573 
   1574 	if (memh_valid) {
   1575 		sc->sc_st = memt;
   1576 		sc->sc_sh = memh;
   1577 		sc->sc_ss = memsize;
   1578 	} else {
   1579 		aprint_error_dev(sc->sc_dev,
   1580 		    "unable to map device registers\n");
   1581 		return;
   1582 	}
   1583 
   1584 	/*
   1585 	 * In addition, i82544 and later support I/O mapped indirect
   1586 	 * register access.  It is not desirable (nor supported in
   1587 	 * this driver) to use it for normal operation, though it is
   1588 	 * required to work around bugs in some chip versions.
   1589 	 */
   1590 	if (sc->sc_type >= WM_T_82544) {
   1591 		/* First we have to find the I/O BAR. */
   1592 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1593 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1594 			if (memtype == PCI_MAPREG_TYPE_IO)
   1595 				break;
   1596 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1597 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1598 				i += 4;	/* skip high bits, too */
   1599 		}
   1600 		if (i < PCI_MAPREG_END) {
   1601 			/*
   1602 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1603 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1604 			 * It's no problem because newer chips has no this
   1605 			 * bug.
   1606 			 *
   1607 			 * The i8254x doesn't apparently respond when the
   1608 			 * I/O BAR is 0, which looks somewhat like it's not
   1609 			 * been configured.
   1610 			 */
   1611 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1612 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1613 				aprint_error_dev(sc->sc_dev,
   1614 				    "WARNING: I/O BAR at zero.\n");
   1615 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1616 					0, &sc->sc_iot, &sc->sc_ioh,
   1617 					NULL, &sc->sc_ios) == 0) {
   1618 				sc->sc_flags |= WM_F_IOH_VALID;
   1619 			} else {
   1620 				aprint_error_dev(sc->sc_dev,
   1621 				    "WARNING: unable to map I/O space\n");
   1622 			}
   1623 		}
   1624 
   1625 	}
   1626 
   1627 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1628 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1629 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1630 	if (sc->sc_type < WM_T_82542_2_1)
   1631 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1632 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1633 
   1634 	/* power up chip */
   1635 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1636 	    NULL)) && error != EOPNOTSUPP) {
   1637 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1638 		return;
   1639 	}
   1640 
   1641 #ifndef WM_MSI_MSIX
   1642 	/*
   1643 	 * Map and establish our interrupt.
   1644 	 */
   1645 	if (pci_intr_map(pa, &ih)) {
   1646 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1647 		return;
   1648 	}
   1649 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1650 #ifdef WM_MPSAFE
   1651 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1652 #endif
   1653 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
   1654 	    wm_intr_legacy, sc, device_xname(sc->sc_dev));
   1655 	if (sc->sc_ihs[0] == NULL) {
   1656 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1657 		if (intrstr != NULL)
   1658 			aprint_error(" at %s", intrstr);
   1659 		aprint_error("\n");
   1660 		return;
   1661 	}
   1662 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1663 	sc->sc_nintrs = 1;
   1664 #else /* WM_MSI_MSIX */
   1665 	/* Allocation settings */
   1666 	max_type = PCI_INTR_TYPE_MSIX;
   1667 	counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
   1668 	counts[PCI_INTR_TYPE_MSI] = 1;
   1669 	counts[PCI_INTR_TYPE_INTX] = 1;
   1670 
   1671 alloc_retry:
   1672 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1673 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1674 		return;
   1675 	}
   1676 
   1677 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1678 		void *vih;
   1679 		kcpuset_t *affinity;
   1680 		char intr_xname[INTRDEVNAMEBUF];
   1681 
   1682 		kcpuset_create(&affinity, false);
   1683 
   1684 		for (i = 0; i < WM_MSIX_NINTR; i++) {
   1685 			intrstr = pci_intr_string(pc,
   1686 			    sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
   1687 			    sizeof(intrbuf));
   1688 #ifdef WM_MPSAFE
   1689 			pci_intr_setattr(pc,
   1690 			    &sc->sc_intrs[msix_matrix[i].intridx],
   1691 			    PCI_INTR_MPSAFE, true);
   1692 #endif
   1693 			memset(intr_xname, 0, sizeof(intr_xname));
   1694 			strlcat(intr_xname, device_xname(sc->sc_dev),
   1695 			    sizeof(intr_xname));
   1696 			strlcat(intr_xname, msix_matrix[i].intrname,
   1697 			    sizeof(intr_xname));
   1698 			vih = pci_intr_establish_xname(pc,
   1699 			    sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
   1700 			    msix_matrix[i].func, sc, intr_xname);
   1701 			if (vih == NULL) {
   1702 				aprint_error_dev(sc->sc_dev,
   1703 				    "unable to establish MSI-X(for %s)%s%s\n",
   1704 				    msix_matrix[i].intrname,
   1705 				    intrstr ? " at " : "",
   1706 				    intrstr ? intrstr : "");
   1707 				pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1708 				    WM_MSIX_NINTR);
   1709 				kcpuset_destroy(affinity);
   1710 
   1711 				/* Setup for MSI: Disable MSI-X */
   1712 				max_type = PCI_INTR_TYPE_MSI;
   1713 				counts[PCI_INTR_TYPE_MSI] = 1;
   1714 				counts[PCI_INTR_TYPE_INTX] = 1;
   1715 				goto alloc_retry;
   1716 			}
   1717 			kcpuset_zero(affinity);
   1718 			/* Round-robin affinity */
   1719 			kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
   1720 			error = interrupt_distribute(vih, affinity, NULL);
   1721 			if (error == 0) {
   1722 				aprint_normal_dev(sc->sc_dev,
   1723 				    "for %s interrupting at %s affinity to %u\n",
   1724 				    msix_matrix[i].intrname, intrstr,
   1725 				    msix_matrix[i].cpuid % ncpu);
   1726 			} else {
   1727 				aprint_normal_dev(sc->sc_dev,
   1728 				    "for %s interrupting at %s\n",
   1729 				    msix_matrix[i].intrname, intrstr);
   1730 			}
   1731 			sc->sc_ihs[msix_matrix[i].intridx] = vih;
   1732 		}
   1733 
   1734 		sc->sc_nintrs = WM_MSIX_NINTR;
   1735 		kcpuset_destroy(affinity);
   1736 	} else {
   1737 		/* MSI or INTx */
   1738 		intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   1739 		    sizeof(intrbuf));
   1740 #ifdef WM_MPSAFE
   1741 		pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   1742 #endif
   1743 		sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   1744 		    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   1745 		if (sc->sc_ihs[0] == NULL) {
   1746 			aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   1747 			    (pci_intr_type(sc->sc_intrs[0])
   1748 				== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   1749 			pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
   1750 			switch (pci_intr_type(sc->sc_intrs[0])) {
   1751 			case PCI_INTR_TYPE_MSI:
   1752 				/* The next try is for INTx: Disable MSI */
   1753 				max_type = PCI_INTR_TYPE_INTX;
   1754 				counts[PCI_INTR_TYPE_INTX] = 1;
   1755 				goto alloc_retry;
   1756 			case PCI_INTR_TYPE_INTX:
   1757 			default:
   1758 				return;
   1759 			}
   1760 		}
   1761 		aprint_normal_dev(sc->sc_dev, "%s at %s\n",
   1762 		    (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI)
   1763 			? "MSI" : "interrupting", intrstr);
   1764 
   1765 		sc->sc_nintrs = 1;
   1766 	}
   1767 #endif /* WM_MSI_MSIX */
   1768 
   1769 	/*
   1770 	 * Check the function ID (unit number of the chip).
   1771 	 */
   1772 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1773 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1774 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1775 	    || (sc->sc_type == WM_T_82580)
   1776 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1777 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1778 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1779 	else
   1780 		sc->sc_funcid = 0;
   1781 
   1782 	/*
   1783 	 * Determine a few things about the bus we're connected to.
   1784 	 */
   1785 	if (sc->sc_type < WM_T_82543) {
   1786 		/* We don't really know the bus characteristics here. */
   1787 		sc->sc_bus_speed = 33;
   1788 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1789 		/*
   1790 		 * CSA (Communication Streaming Architecture) is about as fast
   1791 		 * a 32-bit 66MHz PCI Bus.
   1792 		 */
   1793 		sc->sc_flags |= WM_F_CSA;
   1794 		sc->sc_bus_speed = 66;
   1795 		aprint_verbose_dev(sc->sc_dev,
   1796 		    "Communication Streaming Architecture\n");
   1797 		if (sc->sc_type == WM_T_82547) {
   1798 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1799 			callout_setfunc(&sc->sc_txfifo_ch,
   1800 					wm_82547_txfifo_stall, sc);
   1801 			aprint_verbose_dev(sc->sc_dev,
   1802 			    "using 82547 Tx FIFO stall work-around\n");
   1803 		}
   1804 	} else if (sc->sc_type >= WM_T_82571) {
   1805 		sc->sc_flags |= WM_F_PCIE;
   1806 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1807 		    && (sc->sc_type != WM_T_ICH10)
   1808 		    && (sc->sc_type != WM_T_PCH)
   1809 		    && (sc->sc_type != WM_T_PCH2)
   1810 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1811 			/* ICH* and PCH* have no PCIe capability registers */
   1812 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1813 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1814 				NULL) == 0)
   1815 				aprint_error_dev(sc->sc_dev,
   1816 				    "unable to find PCIe capability\n");
   1817 		}
   1818 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1819 	} else {
   1820 		reg = CSR_READ(sc, WMREG_STATUS);
   1821 		if (reg & STATUS_BUS64)
   1822 			sc->sc_flags |= WM_F_BUS64;
   1823 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1824 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1825 
   1826 			sc->sc_flags |= WM_F_PCIX;
   1827 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1828 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1829 				aprint_error_dev(sc->sc_dev,
   1830 				    "unable to find PCIX capability\n");
   1831 			else if (sc->sc_type != WM_T_82545_3 &&
   1832 				 sc->sc_type != WM_T_82546_3) {
   1833 				/*
   1834 				 * Work around a problem caused by the BIOS
   1835 				 * setting the max memory read byte count
   1836 				 * incorrectly.
   1837 				 */
   1838 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1839 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1840 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1841 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1842 
   1843 				bytecnt =
   1844 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1845 				    PCIX_CMD_BYTECNT_SHIFT;
   1846 				maxb =
   1847 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1848 				    PCIX_STATUS_MAXB_SHIFT;
   1849 				if (bytecnt > maxb) {
   1850 					aprint_verbose_dev(sc->sc_dev,
   1851 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1852 					    512 << bytecnt, 512 << maxb);
   1853 					pcix_cmd = (pcix_cmd &
   1854 					    ~PCIX_CMD_BYTECNT_MASK) |
   1855 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1856 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1857 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1858 					    pcix_cmd);
   1859 				}
   1860 			}
   1861 		}
   1862 		/*
   1863 		 * The quad port adapter is special; it has a PCIX-PCIX
   1864 		 * bridge on the board, and can run the secondary bus at
   1865 		 * a higher speed.
   1866 		 */
   1867 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1868 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1869 								      : 66;
   1870 		} else if (sc->sc_flags & WM_F_PCIX) {
   1871 			switch (reg & STATUS_PCIXSPD_MASK) {
   1872 			case STATUS_PCIXSPD_50_66:
   1873 				sc->sc_bus_speed = 66;
   1874 				break;
   1875 			case STATUS_PCIXSPD_66_100:
   1876 				sc->sc_bus_speed = 100;
   1877 				break;
   1878 			case STATUS_PCIXSPD_100_133:
   1879 				sc->sc_bus_speed = 133;
   1880 				break;
   1881 			default:
   1882 				aprint_error_dev(sc->sc_dev,
   1883 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1884 				    reg & STATUS_PCIXSPD_MASK);
   1885 				sc->sc_bus_speed = 66;
   1886 				break;
   1887 			}
   1888 		} else
   1889 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1890 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1891 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1892 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1893 	}
   1894 
   1895 	/* XXX Currently, Tx, Rx queue are always one. */
   1896 	sc->sc_nrxqueues = 1;
   1897 	sc->sc_ntxqueues = 1;
   1898 	error = wm_alloc_txrx_queues(sc);
   1899 	if (error)
   1900 		return;
   1901 
   1902 	/* clear interesting stat counters */
   1903 	CSR_READ(sc, WMREG_COLC);
   1904 	CSR_READ(sc, WMREG_RXERRC);
   1905 
   1906 	/* get PHY control from SMBus to PCIe */
   1907 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1908 	    || (sc->sc_type == WM_T_PCH_LPT))
   1909 		wm_smbustopci(sc);
   1910 
   1911 	/* Reset the chip to a known state. */
   1912 	wm_reset(sc);
   1913 
   1914 	/* Get some information about the EEPROM. */
   1915 	switch (sc->sc_type) {
   1916 	case WM_T_82542_2_0:
   1917 	case WM_T_82542_2_1:
   1918 	case WM_T_82543:
   1919 	case WM_T_82544:
   1920 		/* Microwire */
   1921 		sc->sc_nvm_wordsize = 64;
   1922 		sc->sc_nvm_addrbits = 6;
   1923 		break;
   1924 	case WM_T_82540:
   1925 	case WM_T_82545:
   1926 	case WM_T_82545_3:
   1927 	case WM_T_82546:
   1928 	case WM_T_82546_3:
   1929 		/* Microwire */
   1930 		reg = CSR_READ(sc, WMREG_EECD);
   1931 		if (reg & EECD_EE_SIZE) {
   1932 			sc->sc_nvm_wordsize = 256;
   1933 			sc->sc_nvm_addrbits = 8;
   1934 		} else {
   1935 			sc->sc_nvm_wordsize = 64;
   1936 			sc->sc_nvm_addrbits = 6;
   1937 		}
   1938 		sc->sc_flags |= WM_F_LOCK_EECD;
   1939 		break;
   1940 	case WM_T_82541:
   1941 	case WM_T_82541_2:
   1942 	case WM_T_82547:
   1943 	case WM_T_82547_2:
   1944 		sc->sc_flags |= WM_F_LOCK_EECD;
   1945 		reg = CSR_READ(sc, WMREG_EECD);
   1946 		if (reg & EECD_EE_TYPE) {
   1947 			/* SPI */
   1948 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1949 			wm_nvm_set_addrbits_size_eecd(sc);
   1950 		} else {
   1951 			/* Microwire */
   1952 			if ((reg & EECD_EE_ABITS) != 0) {
   1953 				sc->sc_nvm_wordsize = 256;
   1954 				sc->sc_nvm_addrbits = 8;
   1955 			} else {
   1956 				sc->sc_nvm_wordsize = 64;
   1957 				sc->sc_nvm_addrbits = 6;
   1958 			}
   1959 		}
   1960 		break;
   1961 	case WM_T_82571:
   1962 	case WM_T_82572:
   1963 		/* SPI */
   1964 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1965 		wm_nvm_set_addrbits_size_eecd(sc);
   1966 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1967 		break;
   1968 	case WM_T_82573:
   1969 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1970 		/* FALLTHROUGH */
   1971 	case WM_T_82574:
   1972 	case WM_T_82583:
   1973 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1974 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1975 			sc->sc_nvm_wordsize = 2048;
   1976 		} else {
   1977 			/* SPI */
   1978 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1979 			wm_nvm_set_addrbits_size_eecd(sc);
   1980 		}
   1981 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1982 		break;
   1983 	case WM_T_82575:
   1984 	case WM_T_82576:
   1985 	case WM_T_82580:
   1986 	case WM_T_I350:
   1987 	case WM_T_I354:
   1988 	case WM_T_80003:
   1989 		/* SPI */
   1990 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1991 		wm_nvm_set_addrbits_size_eecd(sc);
   1992 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1993 		    | WM_F_LOCK_SWSM;
   1994 		break;
   1995 	case WM_T_ICH8:
   1996 	case WM_T_ICH9:
   1997 	case WM_T_ICH10:
   1998 	case WM_T_PCH:
   1999 	case WM_T_PCH2:
   2000 	case WM_T_PCH_LPT:
   2001 		/* FLASH */
   2002 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2003 		sc->sc_nvm_wordsize = 2048;
   2004 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   2005 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2006 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2007 			aprint_error_dev(sc->sc_dev,
   2008 			    "can't map FLASH registers\n");
   2009 			goto out;
   2010 		}
   2011 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2012 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2013 						ICH_FLASH_SECTOR_SIZE;
   2014 		sc->sc_ich8_flash_bank_size =
   2015 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2016 		sc->sc_ich8_flash_bank_size -=
   2017 		    (reg & ICH_GFPREG_BASE_MASK);
   2018 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2019 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2020 		break;
   2021 	case WM_T_I210:
   2022 	case WM_T_I211:
   2023 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2024 			wm_nvm_set_addrbits_size_eecd(sc);
   2025 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2026 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   2027 		} else {
   2028 			sc->sc_nvm_wordsize = INVM_SIZE;
   2029 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2030 			sc->sc_flags |= WM_F_LOCK_SWFW;
   2031 		}
   2032 		break;
   2033 	default:
   2034 		break;
   2035 	}
   2036 
   2037 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2038 	switch (sc->sc_type) {
   2039 	case WM_T_82571:
   2040 	case WM_T_82572:
   2041 		reg = CSR_READ(sc, WMREG_SWSM2);
   2042 		if ((reg & SWSM2_LOCK) == 0) {
   2043 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2044 			force_clear_smbi = true;
   2045 		} else
   2046 			force_clear_smbi = false;
   2047 		break;
   2048 	case WM_T_82573:
   2049 	case WM_T_82574:
   2050 	case WM_T_82583:
   2051 		force_clear_smbi = true;
   2052 		break;
   2053 	default:
   2054 		force_clear_smbi = false;
   2055 		break;
   2056 	}
   2057 	if (force_clear_smbi) {
   2058 		reg = CSR_READ(sc, WMREG_SWSM);
   2059 		if ((reg & SWSM_SMBI) != 0)
   2060 			aprint_error_dev(sc->sc_dev,
   2061 			    "Please update the Bootagent\n");
   2062 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2063 	}
   2064 
   2065 	/*
   2066 	 * Defer printing the EEPROM type until after verifying the checksum
   2067 	 * This allows the EEPROM type to be printed correctly in the case
   2068 	 * that no EEPROM is attached.
   2069 	 */
   2070 	/*
   2071 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2072 	 * this for later, so we can fail future reads from the EEPROM.
   2073 	 */
   2074 	if (wm_nvm_validate_checksum(sc)) {
   2075 		/*
   2076 		 * Read twice again because some PCI-e parts fail the
   2077 		 * first check due to the link being in sleep state.
   2078 		 */
   2079 		if (wm_nvm_validate_checksum(sc))
   2080 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2081 	}
   2082 
   2083 	/* Set device properties (macflags) */
   2084 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2085 
   2086 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2087 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2088 	else {
   2089 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2090 		    sc->sc_nvm_wordsize);
   2091 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2092 			aprint_verbose("iNVM");
   2093 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2094 			aprint_verbose("FLASH(HW)");
   2095 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2096 			aprint_verbose("FLASH");
   2097 		else {
   2098 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2099 				eetype = "SPI";
   2100 			else
   2101 				eetype = "MicroWire";
   2102 			aprint_verbose("(%d address bits) %s EEPROM",
   2103 			    sc->sc_nvm_addrbits, eetype);
   2104 		}
   2105 	}
   2106 	wm_nvm_version(sc);
   2107 	aprint_verbose("\n");
   2108 
   2109 	/* Check for I21[01] PLL workaround */
   2110 	if (sc->sc_type == WM_T_I210)
   2111 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2112 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2113 		/* NVM image release 3.25 has a workaround */
   2114 		if ((sc->sc_nvm_ver_major < 3)
   2115 		    || ((sc->sc_nvm_ver_major == 3)
   2116 			&& (sc->sc_nvm_ver_minor < 25))) {
   2117 			aprint_verbose_dev(sc->sc_dev,
   2118 			    "ROM image version %d.%d is older than 3.25\n",
   2119 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2120 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2121 		}
   2122 	}
   2123 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2124 		wm_pll_workaround_i210(sc);
   2125 
   2126 	switch (sc->sc_type) {
   2127 	case WM_T_82571:
   2128 	case WM_T_82572:
   2129 	case WM_T_82573:
   2130 	case WM_T_82574:
   2131 	case WM_T_82583:
   2132 	case WM_T_80003:
   2133 	case WM_T_ICH8:
   2134 	case WM_T_ICH9:
   2135 	case WM_T_ICH10:
   2136 	case WM_T_PCH:
   2137 	case WM_T_PCH2:
   2138 	case WM_T_PCH_LPT:
   2139 		if (wm_check_mng_mode(sc) != 0)
   2140 			wm_get_hw_control(sc);
   2141 		break;
   2142 	default:
   2143 		break;
   2144 	}
   2145 	wm_get_wakeup(sc);
   2146 	/*
   2147 	 * Read the Ethernet address from the EEPROM, if not first found
   2148 	 * in device properties.
   2149 	 */
   2150 	ea = prop_dictionary_get(dict, "mac-address");
   2151 	if (ea != NULL) {
   2152 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2153 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2154 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2155 	} else {
   2156 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2157 			aprint_error_dev(sc->sc_dev,
   2158 			    "unable to read Ethernet address\n");
   2159 			goto out;
   2160 		}
   2161 	}
   2162 
   2163 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2164 	    ether_sprintf(enaddr));
   2165 
   2166 	/*
   2167 	 * Read the config info from the EEPROM, and set up various
   2168 	 * bits in the control registers based on their contents.
   2169 	 */
   2170 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2171 	if (pn != NULL) {
   2172 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2173 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2174 	} else {
   2175 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2176 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2177 			goto out;
   2178 		}
   2179 	}
   2180 
   2181 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2182 	if (pn != NULL) {
   2183 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2184 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2185 	} else {
   2186 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2187 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2188 			goto out;
   2189 		}
   2190 	}
   2191 
   2192 	/* check for WM_F_WOL */
   2193 	switch (sc->sc_type) {
   2194 	case WM_T_82542_2_0:
   2195 	case WM_T_82542_2_1:
   2196 	case WM_T_82543:
   2197 		/* dummy? */
   2198 		eeprom_data = 0;
   2199 		apme_mask = NVM_CFG3_APME;
   2200 		break;
   2201 	case WM_T_82544:
   2202 		apme_mask = NVM_CFG2_82544_APM_EN;
   2203 		eeprom_data = cfg2;
   2204 		break;
   2205 	case WM_T_82546:
   2206 	case WM_T_82546_3:
   2207 	case WM_T_82571:
   2208 	case WM_T_82572:
   2209 	case WM_T_82573:
   2210 	case WM_T_82574:
   2211 	case WM_T_82583:
   2212 	case WM_T_80003:
   2213 	default:
   2214 		apme_mask = NVM_CFG3_APME;
   2215 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2216 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2217 		break;
   2218 	case WM_T_82575:
   2219 	case WM_T_82576:
   2220 	case WM_T_82580:
   2221 	case WM_T_I350:
   2222 	case WM_T_I354: /* XXX ok? */
   2223 	case WM_T_ICH8:
   2224 	case WM_T_ICH9:
   2225 	case WM_T_ICH10:
   2226 	case WM_T_PCH:
   2227 	case WM_T_PCH2:
   2228 	case WM_T_PCH_LPT:
   2229 		/* XXX The funcid should be checked on some devices */
   2230 		apme_mask = WUC_APME;
   2231 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2232 		break;
   2233 	}
   2234 
   2235 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2236 	if ((eeprom_data & apme_mask) != 0)
   2237 		sc->sc_flags |= WM_F_WOL;
   2238 #ifdef WM_DEBUG
   2239 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2240 		printf("WOL\n");
   2241 #endif
   2242 
   2243 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2244 		/* Check NVM for autonegotiation */
   2245 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2246 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2247 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2248 		}
   2249 	}
   2250 
   2251 	/*
   2252 	 * XXX need special handling for some multiple port cards
   2253 	 * to disable a paticular port.
   2254 	 */
   2255 
   2256 	if (sc->sc_type >= WM_T_82544) {
   2257 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2258 		if (pn != NULL) {
   2259 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2260 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2261 		} else {
   2262 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2263 				aprint_error_dev(sc->sc_dev,
   2264 				    "unable to read SWDPIN\n");
   2265 				goto out;
   2266 			}
   2267 		}
   2268 	}
   2269 
   2270 	if (cfg1 & NVM_CFG1_ILOS)
   2271 		sc->sc_ctrl |= CTRL_ILOS;
   2272 
   2273 	/*
   2274 	 * XXX
   2275 	 * This code isn't correct because pin 2 and 3 are located
   2276 	 * in different position on newer chips. Check all datasheet.
   2277 	 *
   2278 	 * Until resolve this problem, check if a chip < 82580
   2279 	 */
   2280 	if (sc->sc_type <= WM_T_82580) {
   2281 		if (sc->sc_type >= WM_T_82544) {
   2282 			sc->sc_ctrl |=
   2283 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2284 			    CTRL_SWDPIO_SHIFT;
   2285 			sc->sc_ctrl |=
   2286 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2287 			    CTRL_SWDPINS_SHIFT;
   2288 		} else {
   2289 			sc->sc_ctrl |=
   2290 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2291 			    CTRL_SWDPIO_SHIFT;
   2292 		}
   2293 	}
   2294 
   2295 	/* XXX For other than 82580? */
   2296 	if (sc->sc_type == WM_T_82580) {
   2297 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2298 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2299 		if (nvmword & __BIT(13)) {
   2300 			printf("SET ILOS\n");
   2301 			sc->sc_ctrl |= CTRL_ILOS;
   2302 		}
   2303 	}
   2304 
   2305 #if 0
   2306 	if (sc->sc_type >= WM_T_82544) {
   2307 		if (cfg1 & NVM_CFG1_IPS0)
   2308 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2309 		if (cfg1 & NVM_CFG1_IPS1)
   2310 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2311 		sc->sc_ctrl_ext |=
   2312 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2313 		    CTRL_EXT_SWDPIO_SHIFT;
   2314 		sc->sc_ctrl_ext |=
   2315 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2316 		    CTRL_EXT_SWDPINS_SHIFT;
   2317 	} else {
   2318 		sc->sc_ctrl_ext |=
   2319 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2320 		    CTRL_EXT_SWDPIO_SHIFT;
   2321 	}
   2322 #endif
   2323 
   2324 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2325 #if 0
   2326 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2327 #endif
   2328 
   2329 	if (sc->sc_type == WM_T_PCH) {
   2330 		uint16_t val;
   2331 
   2332 		/* Save the NVM K1 bit setting */
   2333 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2334 
   2335 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2336 			sc->sc_nvm_k1_enabled = 1;
   2337 		else
   2338 			sc->sc_nvm_k1_enabled = 0;
   2339 	}
   2340 
   2341 	/*
   2342 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2343 	 * media structures accordingly.
   2344 	 */
   2345 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2346 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2347 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2348 	    || sc->sc_type == WM_T_82573
   2349 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2350 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2351 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2352 	} else if (sc->sc_type < WM_T_82543 ||
   2353 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2354 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2355 			aprint_error_dev(sc->sc_dev,
   2356 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2357 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2358 		}
   2359 		wm_tbi_mediainit(sc);
   2360 	} else {
   2361 		switch (sc->sc_type) {
   2362 		case WM_T_82575:
   2363 		case WM_T_82576:
   2364 		case WM_T_82580:
   2365 		case WM_T_I350:
   2366 		case WM_T_I354:
   2367 		case WM_T_I210:
   2368 		case WM_T_I211:
   2369 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2370 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2371 			switch (link_mode) {
   2372 			case CTRL_EXT_LINK_MODE_1000KX:
   2373 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2374 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2375 				break;
   2376 			case CTRL_EXT_LINK_MODE_SGMII:
   2377 				if (wm_sgmii_uses_mdio(sc)) {
   2378 					aprint_verbose_dev(sc->sc_dev,
   2379 					    "SGMII(MDIO)\n");
   2380 					sc->sc_flags |= WM_F_SGMII;
   2381 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2382 					break;
   2383 				}
   2384 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2385 				/*FALLTHROUGH*/
   2386 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2387 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2388 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2389 					if (link_mode
   2390 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2391 						sc->sc_mediatype
   2392 						    = WM_MEDIATYPE_COPPER;
   2393 						sc->sc_flags |= WM_F_SGMII;
   2394 					} else {
   2395 						sc->sc_mediatype
   2396 						    = WM_MEDIATYPE_SERDES;
   2397 						aprint_verbose_dev(sc->sc_dev,
   2398 						    "SERDES\n");
   2399 					}
   2400 					break;
   2401 				}
   2402 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2403 					aprint_verbose_dev(sc->sc_dev,
   2404 					    "SERDES\n");
   2405 
   2406 				/* Change current link mode setting */
   2407 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2408 				switch (sc->sc_mediatype) {
   2409 				case WM_MEDIATYPE_COPPER:
   2410 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2411 					break;
   2412 				case WM_MEDIATYPE_SERDES:
   2413 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2414 					break;
   2415 				default:
   2416 					break;
   2417 				}
   2418 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2419 				break;
   2420 			case CTRL_EXT_LINK_MODE_GMII:
   2421 			default:
   2422 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2423 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2424 				break;
   2425 			}
   2426 
   2427 			reg &= ~CTRL_EXT_I2C_ENA;
   2428 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2429 				reg |= CTRL_EXT_I2C_ENA;
   2430 			else
   2431 				reg &= ~CTRL_EXT_I2C_ENA;
   2432 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2433 
   2434 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2435 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2436 			else
   2437 				wm_tbi_mediainit(sc);
   2438 			break;
   2439 		default:
   2440 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2441 				aprint_error_dev(sc->sc_dev,
   2442 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2443 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2444 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2445 		}
   2446 	}
   2447 
   2448 	ifp = &sc->sc_ethercom.ec_if;
   2449 	xname = device_xname(sc->sc_dev);
   2450 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2451 	ifp->if_softc = sc;
   2452 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2453 	ifp->if_ioctl = wm_ioctl;
   2454 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2455 		ifp->if_start = wm_nq_start;
   2456 	else
   2457 		ifp->if_start = wm_start;
   2458 	ifp->if_watchdog = wm_watchdog;
   2459 	ifp->if_init = wm_init;
   2460 	ifp->if_stop = wm_stop;
   2461 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2462 	IFQ_SET_READY(&ifp->if_snd);
   2463 
   2464 	/* Check for jumbo frame */
   2465 	switch (sc->sc_type) {
   2466 	case WM_T_82573:
   2467 		/* XXX limited to 9234 if ASPM is disabled */
   2468 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2469 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2470 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2471 		break;
   2472 	case WM_T_82571:
   2473 	case WM_T_82572:
   2474 	case WM_T_82574:
   2475 	case WM_T_82575:
   2476 	case WM_T_82576:
   2477 	case WM_T_82580:
   2478 	case WM_T_I350:
   2479 	case WM_T_I354: /* XXXX ok? */
   2480 	case WM_T_I210:
   2481 	case WM_T_I211:
   2482 	case WM_T_80003:
   2483 	case WM_T_ICH9:
   2484 	case WM_T_ICH10:
   2485 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2486 	case WM_T_PCH_LPT:
   2487 		/* XXX limited to 9234 */
   2488 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2489 		break;
   2490 	case WM_T_PCH:
   2491 		/* XXX limited to 4096 */
   2492 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2493 		break;
   2494 	case WM_T_82542_2_0:
   2495 	case WM_T_82542_2_1:
   2496 	case WM_T_82583:
   2497 	case WM_T_ICH8:
   2498 		/* No support for jumbo frame */
   2499 		break;
   2500 	default:
   2501 		/* ETHER_MAX_LEN_JUMBO */
   2502 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2503 		break;
   2504 	}
   2505 
   2506 	/* If we're a i82543 or greater, we can support VLANs. */
   2507 	if (sc->sc_type >= WM_T_82543)
   2508 		sc->sc_ethercom.ec_capabilities |=
   2509 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2510 
   2511 	/*
   2512 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2513 	 * on i82543 and later.
   2514 	 */
   2515 	if (sc->sc_type >= WM_T_82543) {
   2516 		ifp->if_capabilities |=
   2517 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2518 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2519 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2520 		    IFCAP_CSUM_TCPv6_Tx |
   2521 		    IFCAP_CSUM_UDPv6_Tx;
   2522 	}
   2523 
   2524 	/*
   2525 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2526 	 *
   2527 	 *	82541GI (8086:1076) ... no
   2528 	 *	82572EI (8086:10b9) ... yes
   2529 	 */
   2530 	if (sc->sc_type >= WM_T_82571) {
   2531 		ifp->if_capabilities |=
   2532 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2533 	}
   2534 
   2535 	/*
   2536 	 * If we're a i82544 or greater (except i82547), we can do
   2537 	 * TCP segmentation offload.
   2538 	 */
   2539 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2540 		ifp->if_capabilities |= IFCAP_TSOv4;
   2541 	}
   2542 
   2543 	if (sc->sc_type >= WM_T_82571) {
   2544 		ifp->if_capabilities |= IFCAP_TSOv6;
   2545 	}
   2546 
   2547 #ifdef WM_MPSAFE
   2548 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2549 #else
   2550 	sc->sc_core_lock = NULL;
   2551 #endif
   2552 
   2553 	/* Attach the interface. */
   2554 	if_attach(ifp);
   2555 	ether_ifattach(ifp, enaddr);
   2556 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2557 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2558 			  RND_FLAG_DEFAULT);
   2559 
   2560 #ifdef WM_EVENT_COUNTERS
   2561 	/* Attach event counters. */
   2562 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2563 	    NULL, xname, "txsstall");
   2564 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2565 	    NULL, xname, "txdstall");
   2566 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2567 	    NULL, xname, "txfifo_stall");
   2568 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2569 	    NULL, xname, "txdw");
   2570 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2571 	    NULL, xname, "txqe");
   2572 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2573 	    NULL, xname, "rxintr");
   2574 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2575 	    NULL, xname, "linkintr");
   2576 
   2577 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2578 	    NULL, xname, "rxipsum");
   2579 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2580 	    NULL, xname, "rxtusum");
   2581 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2582 	    NULL, xname, "txipsum");
   2583 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2584 	    NULL, xname, "txtusum");
   2585 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2586 	    NULL, xname, "txtusum6");
   2587 
   2588 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2589 	    NULL, xname, "txtso");
   2590 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2591 	    NULL, xname, "txtso6");
   2592 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2593 	    NULL, xname, "txtsopain");
   2594 
   2595 	for (i = 0; i < WM_NTXSEGS; i++) {
   2596 		snprintf(wm_txseg_evcnt_names[i],
   2597 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2598 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2599 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2600 	}
   2601 
   2602 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2603 	    NULL, xname, "txdrop");
   2604 
   2605 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2606 	    NULL, xname, "tu");
   2607 
   2608 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2609 	    NULL, xname, "tx_xoff");
   2610 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2611 	    NULL, xname, "tx_xon");
   2612 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2613 	    NULL, xname, "rx_xoff");
   2614 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2615 	    NULL, xname, "rx_xon");
   2616 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2617 	    NULL, xname, "rx_macctl");
   2618 #endif /* WM_EVENT_COUNTERS */
   2619 
   2620 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2621 		pmf_class_network_register(self, ifp);
   2622 	else
   2623 		aprint_error_dev(self, "couldn't establish power handler\n");
   2624 
   2625 	sc->sc_flags |= WM_F_ATTACHED;
   2626  out:
   2627 	return;
   2628 }
   2629 
   2630 /* The detach function (ca_detach) */
   2631 static int
   2632 wm_detach(device_t self, int flags __unused)
   2633 {
   2634 	struct wm_softc *sc = device_private(self);
   2635 	struct wm_rxqueue *rxq = sc->sc_rxq;
   2636 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2637 	int i;
   2638 #ifndef WM_MPSAFE
   2639 	int s;
   2640 #endif
   2641 
   2642 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2643 		return 0;
   2644 
   2645 #ifndef WM_MPSAFE
   2646 	s = splnet();
   2647 #endif
   2648 	/* Stop the interface. Callouts are stopped in it. */
   2649 	wm_stop(ifp, 1);
   2650 
   2651 #ifndef WM_MPSAFE
   2652 	splx(s);
   2653 #endif
   2654 
   2655 	pmf_device_deregister(self);
   2656 
   2657 	/* Tell the firmware about the release */
   2658 	WM_CORE_LOCK(sc);
   2659 	wm_release_manageability(sc);
   2660 	wm_release_hw_control(sc);
   2661 	WM_CORE_UNLOCK(sc);
   2662 
   2663 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2664 
   2665 	/* Delete all remaining media. */
   2666 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2667 
   2668 	ether_ifdetach(ifp);
   2669 	if_detach(ifp);
   2670 
   2671 
   2672 	/* Unload RX dmamaps and free mbufs */
   2673 	WM_RX_LOCK(rxq);
   2674 	wm_rxdrain(sc);
   2675 	WM_RX_UNLOCK(rxq);
   2676 	/* Must unlock here */
   2677 
   2678 	wm_free_txrx_queues(sc);
   2679 
   2680 	/* Disestablish the interrupt handler */
   2681 	for (i = 0; i < sc->sc_nintrs; i++) {
   2682 		if (sc->sc_ihs[i] != NULL) {
   2683 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2684 			sc->sc_ihs[i] = NULL;
   2685 		}
   2686 	}
   2687 #ifdef WM_MSI_MSIX
   2688 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2689 #endif /* WM_MSI_MSIX */
   2690 
   2691 	/* Unmap the registers */
   2692 	if (sc->sc_ss) {
   2693 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2694 		sc->sc_ss = 0;
   2695 	}
   2696 	if (sc->sc_ios) {
   2697 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2698 		sc->sc_ios = 0;
   2699 	}
   2700 	if (sc->sc_flashs) {
   2701 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2702 		sc->sc_flashs = 0;
   2703 	}
   2704 
   2705 	if (sc->sc_core_lock)
   2706 		mutex_obj_free(sc->sc_core_lock);
   2707 
   2708 	return 0;
   2709 }
   2710 
   2711 static bool
   2712 wm_suspend(device_t self, const pmf_qual_t *qual)
   2713 {
   2714 	struct wm_softc *sc = device_private(self);
   2715 
   2716 	wm_release_manageability(sc);
   2717 	wm_release_hw_control(sc);
   2718 #ifdef WM_WOL
   2719 	wm_enable_wakeup(sc);
   2720 #endif
   2721 
   2722 	return true;
   2723 }
   2724 
   2725 static bool
   2726 wm_resume(device_t self, const pmf_qual_t *qual)
   2727 {
   2728 	struct wm_softc *sc = device_private(self);
   2729 
   2730 	wm_init_manageability(sc);
   2731 
   2732 	return true;
   2733 }
   2734 
   2735 /*
   2736  * wm_watchdog:		[ifnet interface function]
   2737  *
   2738  *	Watchdog timer handler.
   2739  */
   2740 static void
   2741 wm_watchdog(struct ifnet *ifp)
   2742 {
   2743 	struct wm_softc *sc = ifp->if_softc;
   2744 	struct wm_txqueue *txq = sc->sc_txq;
   2745 
   2746 	/*
   2747 	 * Since we're using delayed interrupts, sweep up
   2748 	 * before we report an error.
   2749 	 */
   2750 	WM_TX_LOCK(txq);
   2751 	wm_txeof(sc);
   2752 	WM_TX_UNLOCK(txq);
   2753 
   2754 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2755 #ifdef WM_DEBUG
   2756 		int i, j;
   2757 		struct wm_txsoft *txs;
   2758 #endif
   2759 		log(LOG_ERR,
   2760 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2761 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2762 		    txq->txq_next);
   2763 		ifp->if_oerrors++;
   2764 #ifdef WM_DEBUG
   2765 		for (i = txq->txq_txsdirty; i != txq->txq_txsnext ;
   2766 		    i = WM_NEXTTXS(txq, i)) {
   2767 		    txs = &txq->txq_txsoft[i];
   2768 		    printf("txs %d tx %d -> %d\n",
   2769 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2770 		    for (j = txs->txs_firstdesc; ;
   2771 			j = WM_NEXTTX(txq, j)) {
   2772 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2773 			    txq->txq_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2774 			printf("\t %#08x%08x\n",
   2775 			    txq->txq_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2776 			    txq->txq_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2777 			if (j == txs->txs_lastdesc)
   2778 				break;
   2779 			}
   2780 		}
   2781 #endif
   2782 		/* Reset the interface. */
   2783 		(void) wm_init(ifp);
   2784 	}
   2785 
   2786 	/* Try to get more packets going. */
   2787 	ifp->if_start(ifp);
   2788 }
   2789 
   2790 /*
   2791  * wm_tick:
   2792  *
   2793  *	One second timer, used to check link status, sweep up
   2794  *	completed transmit jobs, etc.
   2795  */
   2796 static void
   2797 wm_tick(void *arg)
   2798 {
   2799 	struct wm_softc *sc = arg;
   2800 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2801 #ifndef WM_MPSAFE
   2802 	int s;
   2803 
   2804 	s = splnet();
   2805 #endif
   2806 
   2807 	WM_CORE_LOCK(sc);
   2808 
   2809 	if (sc->sc_stopping)
   2810 		goto out;
   2811 
   2812 	if (sc->sc_type >= WM_T_82542_2_1) {
   2813 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2814 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2815 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2816 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2817 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2818 	}
   2819 
   2820 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2821 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2822 	    + CSR_READ(sc, WMREG_CRCERRS)
   2823 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2824 	    + CSR_READ(sc, WMREG_SYMERRC)
   2825 	    + CSR_READ(sc, WMREG_RXERRC)
   2826 	    + CSR_READ(sc, WMREG_SEC)
   2827 	    + CSR_READ(sc, WMREG_CEXTERR)
   2828 	    + CSR_READ(sc, WMREG_RLEC);
   2829 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2830 
   2831 	if (sc->sc_flags & WM_F_HAS_MII)
   2832 		mii_tick(&sc->sc_mii);
   2833 	else if ((sc->sc_type >= WM_T_82575)
   2834 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2835 		wm_serdes_tick(sc);
   2836 	else
   2837 		wm_tbi_tick(sc);
   2838 
   2839 out:
   2840 	WM_CORE_UNLOCK(sc);
   2841 #ifndef WM_MPSAFE
   2842 	splx(s);
   2843 #endif
   2844 
   2845 	if (!sc->sc_stopping)
   2846 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2847 }
   2848 
   2849 static int
   2850 wm_ifflags_cb(struct ethercom *ec)
   2851 {
   2852 	struct ifnet *ifp = &ec->ec_if;
   2853 	struct wm_softc *sc = ifp->if_softc;
   2854 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2855 	int rc = 0;
   2856 
   2857 	WM_CORE_LOCK(sc);
   2858 
   2859 	if (change != 0)
   2860 		sc->sc_if_flags = ifp->if_flags;
   2861 
   2862 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2863 		rc = ENETRESET;
   2864 		goto out;
   2865 	}
   2866 
   2867 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2868 		wm_set_filter(sc);
   2869 
   2870 	wm_set_vlan(sc);
   2871 
   2872 out:
   2873 	WM_CORE_UNLOCK(sc);
   2874 
   2875 	return rc;
   2876 }
   2877 
   2878 /*
   2879  * wm_ioctl:		[ifnet interface function]
   2880  *
   2881  *	Handle control requests from the operator.
   2882  */
   2883 static int
   2884 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2885 {
   2886 	struct wm_softc *sc = ifp->if_softc;
   2887 	struct ifreq *ifr = (struct ifreq *) data;
   2888 	struct ifaddr *ifa = (struct ifaddr *)data;
   2889 	struct sockaddr_dl *sdl;
   2890 	int s, error;
   2891 
   2892 #ifndef WM_MPSAFE
   2893 	s = splnet();
   2894 #endif
   2895 	switch (cmd) {
   2896 	case SIOCSIFMEDIA:
   2897 	case SIOCGIFMEDIA:
   2898 		WM_CORE_LOCK(sc);
   2899 		/* Flow control requires full-duplex mode. */
   2900 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2901 		    (ifr->ifr_media & IFM_FDX) == 0)
   2902 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2903 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2904 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2905 				/* We can do both TXPAUSE and RXPAUSE. */
   2906 				ifr->ifr_media |=
   2907 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2908 			}
   2909 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2910 		}
   2911 		WM_CORE_UNLOCK(sc);
   2912 #ifdef WM_MPSAFE
   2913 		s = splnet();
   2914 #endif
   2915 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2916 #ifdef WM_MPSAFE
   2917 		splx(s);
   2918 #endif
   2919 		break;
   2920 	case SIOCINITIFADDR:
   2921 		WM_CORE_LOCK(sc);
   2922 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2923 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2924 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2925 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2926 			/* unicast address is first multicast entry */
   2927 			wm_set_filter(sc);
   2928 			error = 0;
   2929 			WM_CORE_UNLOCK(sc);
   2930 			break;
   2931 		}
   2932 		WM_CORE_UNLOCK(sc);
   2933 		/*FALLTHROUGH*/
   2934 	default:
   2935 #ifdef WM_MPSAFE
   2936 		s = splnet();
   2937 #endif
   2938 		/* It may call wm_start, so unlock here */
   2939 		error = ether_ioctl(ifp, cmd, data);
   2940 #ifdef WM_MPSAFE
   2941 		splx(s);
   2942 #endif
   2943 		if (error != ENETRESET)
   2944 			break;
   2945 
   2946 		error = 0;
   2947 
   2948 		if (cmd == SIOCSIFCAP) {
   2949 			error = (*ifp->if_init)(ifp);
   2950 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2951 			;
   2952 		else if (ifp->if_flags & IFF_RUNNING) {
   2953 			/*
   2954 			 * Multicast list has changed; set the hardware filter
   2955 			 * accordingly.
   2956 			 */
   2957 			WM_CORE_LOCK(sc);
   2958 			wm_set_filter(sc);
   2959 			WM_CORE_UNLOCK(sc);
   2960 		}
   2961 		break;
   2962 	}
   2963 
   2964 #ifndef WM_MPSAFE
   2965 	splx(s);
   2966 #endif
   2967 	return error;
   2968 }
   2969 
   2970 /* MAC address related */
   2971 
   2972 /*
   2973  * Get the offset of MAC address and return it.
   2974  * If error occured, use offset 0.
   2975  */
   2976 static uint16_t
   2977 wm_check_alt_mac_addr(struct wm_softc *sc)
   2978 {
   2979 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2980 	uint16_t offset = NVM_OFF_MACADDR;
   2981 
   2982 	/* Try to read alternative MAC address pointer */
   2983 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2984 		return 0;
   2985 
   2986 	/* Check pointer if it's valid or not. */
   2987 	if ((offset == 0x0000) || (offset == 0xffff))
   2988 		return 0;
   2989 
   2990 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2991 	/*
   2992 	 * Check whether alternative MAC address is valid or not.
   2993 	 * Some cards have non 0xffff pointer but those don't use
   2994 	 * alternative MAC address in reality.
   2995 	 *
   2996 	 * Check whether the broadcast bit is set or not.
   2997 	 */
   2998 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2999 		if (((myea[0] & 0xff) & 0x01) == 0)
   3000 			return offset; /* Found */
   3001 
   3002 	/* Not found */
   3003 	return 0;
   3004 }
   3005 
   3006 static int
   3007 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3008 {
   3009 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3010 	uint16_t offset = NVM_OFF_MACADDR;
   3011 	int do_invert = 0;
   3012 
   3013 	switch (sc->sc_type) {
   3014 	case WM_T_82580:
   3015 	case WM_T_I350:
   3016 	case WM_T_I354:
   3017 		/* EEPROM Top Level Partitioning */
   3018 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3019 		break;
   3020 	case WM_T_82571:
   3021 	case WM_T_82575:
   3022 	case WM_T_82576:
   3023 	case WM_T_80003:
   3024 	case WM_T_I210:
   3025 	case WM_T_I211:
   3026 		offset = wm_check_alt_mac_addr(sc);
   3027 		if (offset == 0)
   3028 			if ((sc->sc_funcid & 0x01) == 1)
   3029 				do_invert = 1;
   3030 		break;
   3031 	default:
   3032 		if ((sc->sc_funcid & 0x01) == 1)
   3033 			do_invert = 1;
   3034 		break;
   3035 	}
   3036 
   3037 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   3038 		myea) != 0)
   3039 		goto bad;
   3040 
   3041 	enaddr[0] = myea[0] & 0xff;
   3042 	enaddr[1] = myea[0] >> 8;
   3043 	enaddr[2] = myea[1] & 0xff;
   3044 	enaddr[3] = myea[1] >> 8;
   3045 	enaddr[4] = myea[2] & 0xff;
   3046 	enaddr[5] = myea[2] >> 8;
   3047 
   3048 	/*
   3049 	 * Toggle the LSB of the MAC address on the second port
   3050 	 * of some dual port cards.
   3051 	 */
   3052 	if (do_invert != 0)
   3053 		enaddr[5] ^= 1;
   3054 
   3055 	return 0;
   3056 
   3057  bad:
   3058 	return -1;
   3059 }
   3060 
   3061 /*
   3062  * wm_set_ral:
   3063  *
   3064  *	Set an entery in the receive address list.
   3065  */
   3066 static void
   3067 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3068 {
   3069 	uint32_t ral_lo, ral_hi;
   3070 
   3071 	if (enaddr != NULL) {
   3072 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3073 		    (enaddr[3] << 24);
   3074 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3075 		ral_hi |= RAL_AV;
   3076 	} else {
   3077 		ral_lo = 0;
   3078 		ral_hi = 0;
   3079 	}
   3080 
   3081 	if (sc->sc_type >= WM_T_82544) {
   3082 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3083 		    ral_lo);
   3084 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3085 		    ral_hi);
   3086 	} else {
   3087 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3088 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3089 	}
   3090 }
   3091 
   3092 /*
   3093  * wm_mchash:
   3094  *
   3095  *	Compute the hash of the multicast address for the 4096-bit
   3096  *	multicast filter.
   3097  */
   3098 static uint32_t
   3099 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3100 {
   3101 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3102 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3103 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3104 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3105 	uint32_t hash;
   3106 
   3107 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3108 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3109 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3110 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3111 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3112 		return (hash & 0x3ff);
   3113 	}
   3114 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3115 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3116 
   3117 	return (hash & 0xfff);
   3118 }
   3119 
   3120 /*
   3121  * wm_set_filter:
   3122  *
   3123  *	Set up the receive filter.
   3124  */
   3125 static void
   3126 wm_set_filter(struct wm_softc *sc)
   3127 {
   3128 	struct ethercom *ec = &sc->sc_ethercom;
   3129 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3130 	struct ether_multi *enm;
   3131 	struct ether_multistep step;
   3132 	bus_addr_t mta_reg;
   3133 	uint32_t hash, reg, bit;
   3134 	int i, size;
   3135 
   3136 	if (sc->sc_type >= WM_T_82544)
   3137 		mta_reg = WMREG_CORDOVA_MTA;
   3138 	else
   3139 		mta_reg = WMREG_MTA;
   3140 
   3141 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3142 
   3143 	if (ifp->if_flags & IFF_BROADCAST)
   3144 		sc->sc_rctl |= RCTL_BAM;
   3145 	if (ifp->if_flags & IFF_PROMISC) {
   3146 		sc->sc_rctl |= RCTL_UPE;
   3147 		goto allmulti;
   3148 	}
   3149 
   3150 	/*
   3151 	 * Set the station address in the first RAL slot, and
   3152 	 * clear the remaining slots.
   3153 	 */
   3154 	if (sc->sc_type == WM_T_ICH8)
   3155 		size = WM_RAL_TABSIZE_ICH8 -1;
   3156 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3157 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3158 	    || (sc->sc_type == WM_T_PCH_LPT))
   3159 		size = WM_RAL_TABSIZE_ICH8;
   3160 	else if (sc->sc_type == WM_T_82575)
   3161 		size = WM_RAL_TABSIZE_82575;
   3162 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3163 		size = WM_RAL_TABSIZE_82576;
   3164 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3165 		size = WM_RAL_TABSIZE_I350;
   3166 	else
   3167 		size = WM_RAL_TABSIZE;
   3168 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3169 	for (i = 1; i < size; i++)
   3170 		wm_set_ral(sc, NULL, i);
   3171 
   3172 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3173 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3174 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3175 		size = WM_ICH8_MC_TABSIZE;
   3176 	else
   3177 		size = WM_MC_TABSIZE;
   3178 	/* Clear out the multicast table. */
   3179 	for (i = 0; i < size; i++)
   3180 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3181 
   3182 	ETHER_FIRST_MULTI(step, ec, enm);
   3183 	while (enm != NULL) {
   3184 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3185 			/*
   3186 			 * We must listen to a range of multicast addresses.
   3187 			 * For now, just accept all multicasts, rather than
   3188 			 * trying to set only those filter bits needed to match
   3189 			 * the range.  (At this time, the only use of address
   3190 			 * ranges is for IP multicast routing, for which the
   3191 			 * range is big enough to require all bits set.)
   3192 			 */
   3193 			goto allmulti;
   3194 		}
   3195 
   3196 		hash = wm_mchash(sc, enm->enm_addrlo);
   3197 
   3198 		reg = (hash >> 5);
   3199 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3200 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3201 		    || (sc->sc_type == WM_T_PCH2)
   3202 		    || (sc->sc_type == WM_T_PCH_LPT))
   3203 			reg &= 0x1f;
   3204 		else
   3205 			reg &= 0x7f;
   3206 		bit = hash & 0x1f;
   3207 
   3208 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3209 		hash |= 1U << bit;
   3210 
   3211 		/* XXX Hardware bug?? */
   3212 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3213 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3214 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3215 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3216 		} else
   3217 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3218 
   3219 		ETHER_NEXT_MULTI(step, enm);
   3220 	}
   3221 
   3222 	ifp->if_flags &= ~IFF_ALLMULTI;
   3223 	goto setit;
   3224 
   3225  allmulti:
   3226 	ifp->if_flags |= IFF_ALLMULTI;
   3227 	sc->sc_rctl |= RCTL_MPE;
   3228 
   3229  setit:
   3230 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3231 }
   3232 
   3233 /* Reset and init related */
   3234 
   3235 static void
   3236 wm_set_vlan(struct wm_softc *sc)
   3237 {
   3238 	/* Deal with VLAN enables. */
   3239 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3240 		sc->sc_ctrl |= CTRL_VME;
   3241 	else
   3242 		sc->sc_ctrl &= ~CTRL_VME;
   3243 
   3244 	/* Write the control registers. */
   3245 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3246 }
   3247 
   3248 static void
   3249 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3250 {
   3251 	uint32_t gcr;
   3252 	pcireg_t ctrl2;
   3253 
   3254 	gcr = CSR_READ(sc, WMREG_GCR);
   3255 
   3256 	/* Only take action if timeout value is defaulted to 0 */
   3257 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3258 		goto out;
   3259 
   3260 	if ((gcr & GCR_CAP_VER2) == 0) {
   3261 		gcr |= GCR_CMPL_TMOUT_10MS;
   3262 		goto out;
   3263 	}
   3264 
   3265 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3266 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3267 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3268 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3269 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3270 
   3271 out:
   3272 	/* Disable completion timeout resend */
   3273 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3274 
   3275 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3276 }
   3277 
   3278 void
   3279 wm_get_auto_rd_done(struct wm_softc *sc)
   3280 {
   3281 	int i;
   3282 
   3283 	/* wait for eeprom to reload */
   3284 	switch (sc->sc_type) {
   3285 	case WM_T_82571:
   3286 	case WM_T_82572:
   3287 	case WM_T_82573:
   3288 	case WM_T_82574:
   3289 	case WM_T_82583:
   3290 	case WM_T_82575:
   3291 	case WM_T_82576:
   3292 	case WM_T_82580:
   3293 	case WM_T_I350:
   3294 	case WM_T_I354:
   3295 	case WM_T_I210:
   3296 	case WM_T_I211:
   3297 	case WM_T_80003:
   3298 	case WM_T_ICH8:
   3299 	case WM_T_ICH9:
   3300 		for (i = 0; i < 10; i++) {
   3301 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3302 				break;
   3303 			delay(1000);
   3304 		}
   3305 		if (i == 10) {
   3306 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3307 			    "complete\n", device_xname(sc->sc_dev));
   3308 		}
   3309 		break;
   3310 	default:
   3311 		break;
   3312 	}
   3313 }
   3314 
   3315 void
   3316 wm_lan_init_done(struct wm_softc *sc)
   3317 {
   3318 	uint32_t reg = 0;
   3319 	int i;
   3320 
   3321 	/* wait for eeprom to reload */
   3322 	switch (sc->sc_type) {
   3323 	case WM_T_ICH10:
   3324 	case WM_T_PCH:
   3325 	case WM_T_PCH2:
   3326 	case WM_T_PCH_LPT:
   3327 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3328 			reg = CSR_READ(sc, WMREG_STATUS);
   3329 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3330 				break;
   3331 			delay(100);
   3332 		}
   3333 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3334 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3335 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3336 		}
   3337 		break;
   3338 	default:
   3339 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3340 		    __func__);
   3341 		break;
   3342 	}
   3343 
   3344 	reg &= ~STATUS_LAN_INIT_DONE;
   3345 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3346 }
   3347 
   3348 void
   3349 wm_get_cfg_done(struct wm_softc *sc)
   3350 {
   3351 	int mask;
   3352 	uint32_t reg;
   3353 	int i;
   3354 
   3355 	/* wait for eeprom to reload */
   3356 	switch (sc->sc_type) {
   3357 	case WM_T_82542_2_0:
   3358 	case WM_T_82542_2_1:
   3359 		/* null */
   3360 		break;
   3361 	case WM_T_82543:
   3362 	case WM_T_82544:
   3363 	case WM_T_82540:
   3364 	case WM_T_82545:
   3365 	case WM_T_82545_3:
   3366 	case WM_T_82546:
   3367 	case WM_T_82546_3:
   3368 	case WM_T_82541:
   3369 	case WM_T_82541_2:
   3370 	case WM_T_82547:
   3371 	case WM_T_82547_2:
   3372 	case WM_T_82573:
   3373 	case WM_T_82574:
   3374 	case WM_T_82583:
   3375 		/* generic */
   3376 		delay(10*1000);
   3377 		break;
   3378 	case WM_T_80003:
   3379 	case WM_T_82571:
   3380 	case WM_T_82572:
   3381 	case WM_T_82575:
   3382 	case WM_T_82576:
   3383 	case WM_T_82580:
   3384 	case WM_T_I350:
   3385 	case WM_T_I354:
   3386 	case WM_T_I210:
   3387 	case WM_T_I211:
   3388 		if (sc->sc_type == WM_T_82571) {
   3389 			/* Only 82571 shares port 0 */
   3390 			mask = EEMNGCTL_CFGDONE_0;
   3391 		} else
   3392 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3393 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3394 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3395 				break;
   3396 			delay(1000);
   3397 		}
   3398 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3399 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3400 				device_xname(sc->sc_dev), __func__));
   3401 		}
   3402 		break;
   3403 	case WM_T_ICH8:
   3404 	case WM_T_ICH9:
   3405 	case WM_T_ICH10:
   3406 	case WM_T_PCH:
   3407 	case WM_T_PCH2:
   3408 	case WM_T_PCH_LPT:
   3409 		delay(10*1000);
   3410 		if (sc->sc_type >= WM_T_ICH10)
   3411 			wm_lan_init_done(sc);
   3412 		else
   3413 			wm_get_auto_rd_done(sc);
   3414 
   3415 		reg = CSR_READ(sc, WMREG_STATUS);
   3416 		if ((reg & STATUS_PHYRA) != 0)
   3417 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3418 		break;
   3419 	default:
   3420 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3421 		    __func__);
   3422 		break;
   3423 	}
   3424 }
   3425 
   3426 /* Init hardware bits */
   3427 void
   3428 wm_initialize_hardware_bits(struct wm_softc *sc)
   3429 {
   3430 	uint32_t tarc0, tarc1, reg;
   3431 
   3432 	/* For 82571 variant, 80003 and ICHs */
   3433 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3434 	    || (sc->sc_type >= WM_T_80003)) {
   3435 
   3436 		/* Transmit Descriptor Control 0 */
   3437 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3438 		reg |= TXDCTL_COUNT_DESC;
   3439 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3440 
   3441 		/* Transmit Descriptor Control 1 */
   3442 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3443 		reg |= TXDCTL_COUNT_DESC;
   3444 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3445 
   3446 		/* TARC0 */
   3447 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3448 		switch (sc->sc_type) {
   3449 		case WM_T_82571:
   3450 		case WM_T_82572:
   3451 		case WM_T_82573:
   3452 		case WM_T_82574:
   3453 		case WM_T_82583:
   3454 		case WM_T_80003:
   3455 			/* Clear bits 30..27 */
   3456 			tarc0 &= ~__BITS(30, 27);
   3457 			break;
   3458 		default:
   3459 			break;
   3460 		}
   3461 
   3462 		switch (sc->sc_type) {
   3463 		case WM_T_82571:
   3464 		case WM_T_82572:
   3465 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3466 
   3467 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3468 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3469 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3470 			/* 8257[12] Errata No.7 */
   3471 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3472 
   3473 			/* TARC1 bit 28 */
   3474 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3475 				tarc1 &= ~__BIT(28);
   3476 			else
   3477 				tarc1 |= __BIT(28);
   3478 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3479 
   3480 			/*
   3481 			 * 8257[12] Errata No.13
   3482 			 * Disable Dyamic Clock Gating.
   3483 			 */
   3484 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3485 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3486 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3487 			break;
   3488 		case WM_T_82573:
   3489 		case WM_T_82574:
   3490 		case WM_T_82583:
   3491 			if ((sc->sc_type == WM_T_82574)
   3492 			    || (sc->sc_type == WM_T_82583))
   3493 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3494 
   3495 			/* Extended Device Control */
   3496 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3497 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3498 			reg |= __BIT(22);	/* Set bit 22 */
   3499 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3500 
   3501 			/* Device Control */
   3502 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3503 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3504 
   3505 			/* PCIe Control Register */
   3506 			/*
   3507 			 * 82573 Errata (unknown).
   3508 			 *
   3509 			 * 82574 Errata 25 and 82583 Errata 12
   3510 			 * "Dropped Rx Packets":
   3511 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3512 			 */
   3513 			reg = CSR_READ(sc, WMREG_GCR);
   3514 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3515 			CSR_WRITE(sc, WMREG_GCR, reg);
   3516 
   3517 			if ((sc->sc_type == WM_T_82574)
   3518 			    || (sc->sc_type == WM_T_82583)) {
   3519 				/*
   3520 				 * Document says this bit must be set for
   3521 				 * proper operation.
   3522 				 */
   3523 				reg = CSR_READ(sc, WMREG_GCR);
   3524 				reg |= __BIT(22);
   3525 				CSR_WRITE(sc, WMREG_GCR, reg);
   3526 
   3527 				/*
   3528 				 * Apply workaround for hardware errata
   3529 				 * documented in errata docs Fixes issue where
   3530 				 * some error prone or unreliable PCIe
   3531 				 * completions are occurring, particularly
   3532 				 * with ASPM enabled. Without fix, issue can
   3533 				 * cause Tx timeouts.
   3534 				 */
   3535 				reg = CSR_READ(sc, WMREG_GCR2);
   3536 				reg |= __BIT(0);
   3537 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3538 			}
   3539 			break;
   3540 		case WM_T_80003:
   3541 			/* TARC0 */
   3542 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3543 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3544 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3545 
   3546 			/* TARC1 bit 28 */
   3547 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3548 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3549 				tarc1 &= ~__BIT(28);
   3550 			else
   3551 				tarc1 |= __BIT(28);
   3552 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3553 			break;
   3554 		case WM_T_ICH8:
   3555 		case WM_T_ICH9:
   3556 		case WM_T_ICH10:
   3557 		case WM_T_PCH:
   3558 		case WM_T_PCH2:
   3559 		case WM_T_PCH_LPT:
   3560 			/* TARC 0 */
   3561 			if (sc->sc_type == WM_T_ICH8) {
   3562 				/* Set TARC0 bits 29 and 28 */
   3563 				tarc0 |= __BITS(29, 28);
   3564 			}
   3565 			/* Set TARC0 bits 23,24,26,27 */
   3566 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3567 
   3568 			/* CTRL_EXT */
   3569 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3570 			reg |= __BIT(22);	/* Set bit 22 */
   3571 			/*
   3572 			 * Enable PHY low-power state when MAC is at D3
   3573 			 * w/o WoL
   3574 			 */
   3575 			if (sc->sc_type >= WM_T_PCH)
   3576 				reg |= CTRL_EXT_PHYPDEN;
   3577 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3578 
   3579 			/* TARC1 */
   3580 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3581 			/* bit 28 */
   3582 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3583 				tarc1 &= ~__BIT(28);
   3584 			else
   3585 				tarc1 |= __BIT(28);
   3586 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3587 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3588 
   3589 			/* Device Status */
   3590 			if (sc->sc_type == WM_T_ICH8) {
   3591 				reg = CSR_READ(sc, WMREG_STATUS);
   3592 				reg &= ~__BIT(31);
   3593 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3594 
   3595 			}
   3596 
   3597 			/*
   3598 			 * Work-around descriptor data corruption issue during
   3599 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3600 			 * capability.
   3601 			 */
   3602 			reg = CSR_READ(sc, WMREG_RFCTL);
   3603 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3604 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3605 			break;
   3606 		default:
   3607 			break;
   3608 		}
   3609 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3610 
   3611 		/*
   3612 		 * 8257[12] Errata No.52 and some others.
   3613 		 * Avoid RSS Hash Value bug.
   3614 		 */
   3615 		switch (sc->sc_type) {
   3616 		case WM_T_82571:
   3617 		case WM_T_82572:
   3618 		case WM_T_82573:
   3619 		case WM_T_80003:
   3620 		case WM_T_ICH8:
   3621 			reg = CSR_READ(sc, WMREG_RFCTL);
   3622 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3623 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3624 			break;
   3625 		default:
   3626 			break;
   3627 		}
   3628 	}
   3629 }
   3630 
   3631 static uint32_t
   3632 wm_rxpbs_adjust_82580(uint32_t val)
   3633 {
   3634 	uint32_t rv = 0;
   3635 
   3636 	if (val < __arraycount(wm_82580_rxpbs_table))
   3637 		rv = wm_82580_rxpbs_table[val];
   3638 
   3639 	return rv;
   3640 }
   3641 
   3642 /*
   3643  * wm_reset:
   3644  *
   3645  *	Reset the i82542 chip.
   3646  */
   3647 static void
   3648 wm_reset(struct wm_softc *sc)
   3649 {
   3650 	struct wm_txqueue *txq = sc->sc_txq;
   3651 	int phy_reset = 0;
   3652 	int error = 0;
   3653 	uint32_t reg, mask;
   3654 
   3655 	/*
   3656 	 * Allocate on-chip memory according to the MTU size.
   3657 	 * The Packet Buffer Allocation register must be written
   3658 	 * before the chip is reset.
   3659 	 */
   3660 	switch (sc->sc_type) {
   3661 	case WM_T_82547:
   3662 	case WM_T_82547_2:
   3663 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3664 		    PBA_22K : PBA_30K;
   3665 		txq->txq_fifo_head = 0;
   3666 		txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3667 		txq->txq_fifo_size =
   3668 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3669 		txq->txq_fifo_stall = 0;
   3670 		break;
   3671 	case WM_T_82571:
   3672 	case WM_T_82572:
   3673 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3674 	case WM_T_80003:
   3675 		sc->sc_pba = PBA_32K;
   3676 		break;
   3677 	case WM_T_82573:
   3678 		sc->sc_pba = PBA_12K;
   3679 		break;
   3680 	case WM_T_82574:
   3681 	case WM_T_82583:
   3682 		sc->sc_pba = PBA_20K;
   3683 		break;
   3684 	case WM_T_82576:
   3685 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3686 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3687 		break;
   3688 	case WM_T_82580:
   3689 	case WM_T_I350:
   3690 	case WM_T_I354:
   3691 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3692 		break;
   3693 	case WM_T_I210:
   3694 	case WM_T_I211:
   3695 		sc->sc_pba = PBA_34K;
   3696 		break;
   3697 	case WM_T_ICH8:
   3698 		/* Workaround for a bit corruption issue in FIFO memory */
   3699 		sc->sc_pba = PBA_8K;
   3700 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3701 		break;
   3702 	case WM_T_ICH9:
   3703 	case WM_T_ICH10:
   3704 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3705 		    PBA_14K : PBA_10K;
   3706 		break;
   3707 	case WM_T_PCH:
   3708 	case WM_T_PCH2:
   3709 	case WM_T_PCH_LPT:
   3710 		sc->sc_pba = PBA_26K;
   3711 		break;
   3712 	default:
   3713 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3714 		    PBA_40K : PBA_48K;
   3715 		break;
   3716 	}
   3717 	/*
   3718 	 * Only old or non-multiqueue devices have the PBA register
   3719 	 * XXX Need special handling for 82575.
   3720 	 */
   3721 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3722 	    || (sc->sc_type == WM_T_82575))
   3723 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3724 
   3725 	/* Prevent the PCI-E bus from sticking */
   3726 	if (sc->sc_flags & WM_F_PCIE) {
   3727 		int timeout = 800;
   3728 
   3729 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3730 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3731 
   3732 		while (timeout--) {
   3733 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3734 			    == 0)
   3735 				break;
   3736 			delay(100);
   3737 		}
   3738 	}
   3739 
   3740 	/* Set the completion timeout for interface */
   3741 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3742 	    || (sc->sc_type == WM_T_82580)
   3743 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3744 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3745 		wm_set_pcie_completion_timeout(sc);
   3746 
   3747 	/* Clear interrupt */
   3748 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3749 	if (sc->sc_nintrs > 1) {
   3750 		if (sc->sc_type != WM_T_82574) {
   3751 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3752 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3753 		} else {
   3754 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3755 		}
   3756 	}
   3757 
   3758 	/* Stop the transmit and receive processes. */
   3759 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3760 	sc->sc_rctl &= ~RCTL_EN;
   3761 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3762 	CSR_WRITE_FLUSH(sc);
   3763 
   3764 	/* XXX set_tbi_sbp_82543() */
   3765 
   3766 	delay(10*1000);
   3767 
   3768 	/* Must acquire the MDIO ownership before MAC reset */
   3769 	switch (sc->sc_type) {
   3770 	case WM_T_82573:
   3771 	case WM_T_82574:
   3772 	case WM_T_82583:
   3773 		error = wm_get_hw_semaphore_82573(sc);
   3774 		break;
   3775 	default:
   3776 		break;
   3777 	}
   3778 
   3779 	/*
   3780 	 * 82541 Errata 29? & 82547 Errata 28?
   3781 	 * See also the description about PHY_RST bit in CTRL register
   3782 	 * in 8254x_GBe_SDM.pdf.
   3783 	 */
   3784 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3785 		CSR_WRITE(sc, WMREG_CTRL,
   3786 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3787 		CSR_WRITE_FLUSH(sc);
   3788 		delay(5000);
   3789 	}
   3790 
   3791 	switch (sc->sc_type) {
   3792 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3793 	case WM_T_82541:
   3794 	case WM_T_82541_2:
   3795 	case WM_T_82547:
   3796 	case WM_T_82547_2:
   3797 		/*
   3798 		 * On some chipsets, a reset through a memory-mapped write
   3799 		 * cycle can cause the chip to reset before completing the
   3800 		 * write cycle.  This causes major headache that can be
   3801 		 * avoided by issuing the reset via indirect register writes
   3802 		 * through I/O space.
   3803 		 *
   3804 		 * So, if we successfully mapped the I/O BAR at attach time,
   3805 		 * use that.  Otherwise, try our luck with a memory-mapped
   3806 		 * reset.
   3807 		 */
   3808 		if (sc->sc_flags & WM_F_IOH_VALID)
   3809 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3810 		else
   3811 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3812 		break;
   3813 	case WM_T_82545_3:
   3814 	case WM_T_82546_3:
   3815 		/* Use the shadow control register on these chips. */
   3816 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3817 		break;
   3818 	case WM_T_80003:
   3819 		mask = swfwphysem[sc->sc_funcid];
   3820 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3821 		wm_get_swfw_semaphore(sc, mask);
   3822 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3823 		wm_put_swfw_semaphore(sc, mask);
   3824 		break;
   3825 	case WM_T_ICH8:
   3826 	case WM_T_ICH9:
   3827 	case WM_T_ICH10:
   3828 	case WM_T_PCH:
   3829 	case WM_T_PCH2:
   3830 	case WM_T_PCH_LPT:
   3831 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3832 		if (wm_check_reset_block(sc) == 0) {
   3833 			/*
   3834 			 * Gate automatic PHY configuration by hardware on
   3835 			 * non-managed 82579
   3836 			 */
   3837 			if ((sc->sc_type == WM_T_PCH2)
   3838 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3839 				!= 0))
   3840 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3841 
   3842 
   3843 			reg |= CTRL_PHY_RESET;
   3844 			phy_reset = 1;
   3845 		}
   3846 		wm_get_swfwhw_semaphore(sc);
   3847 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3848 		/* Don't insert a completion barrier when reset */
   3849 		delay(20*1000);
   3850 		wm_put_swfwhw_semaphore(sc);
   3851 		break;
   3852 	case WM_T_82580:
   3853 	case WM_T_I350:
   3854 	case WM_T_I354:
   3855 	case WM_T_I210:
   3856 	case WM_T_I211:
   3857 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3858 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3859 			CSR_WRITE_FLUSH(sc);
   3860 		delay(5000);
   3861 		break;
   3862 	case WM_T_82542_2_0:
   3863 	case WM_T_82542_2_1:
   3864 	case WM_T_82543:
   3865 	case WM_T_82540:
   3866 	case WM_T_82545:
   3867 	case WM_T_82546:
   3868 	case WM_T_82571:
   3869 	case WM_T_82572:
   3870 	case WM_T_82573:
   3871 	case WM_T_82574:
   3872 	case WM_T_82575:
   3873 	case WM_T_82576:
   3874 	case WM_T_82583:
   3875 	default:
   3876 		/* Everything else can safely use the documented method. */
   3877 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3878 		break;
   3879 	}
   3880 
   3881 	/* Must release the MDIO ownership after MAC reset */
   3882 	switch (sc->sc_type) {
   3883 	case WM_T_82573:
   3884 	case WM_T_82574:
   3885 	case WM_T_82583:
   3886 		if (error == 0)
   3887 			wm_put_hw_semaphore_82573(sc);
   3888 		break;
   3889 	default:
   3890 		break;
   3891 	}
   3892 
   3893 	if (phy_reset != 0)
   3894 		wm_get_cfg_done(sc);
   3895 
   3896 	/* reload EEPROM */
   3897 	switch (sc->sc_type) {
   3898 	case WM_T_82542_2_0:
   3899 	case WM_T_82542_2_1:
   3900 	case WM_T_82543:
   3901 	case WM_T_82544:
   3902 		delay(10);
   3903 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3904 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3905 		CSR_WRITE_FLUSH(sc);
   3906 		delay(2000);
   3907 		break;
   3908 	case WM_T_82540:
   3909 	case WM_T_82545:
   3910 	case WM_T_82545_3:
   3911 	case WM_T_82546:
   3912 	case WM_T_82546_3:
   3913 		delay(5*1000);
   3914 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3915 		break;
   3916 	case WM_T_82541:
   3917 	case WM_T_82541_2:
   3918 	case WM_T_82547:
   3919 	case WM_T_82547_2:
   3920 		delay(20000);
   3921 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3922 		break;
   3923 	case WM_T_82571:
   3924 	case WM_T_82572:
   3925 	case WM_T_82573:
   3926 	case WM_T_82574:
   3927 	case WM_T_82583:
   3928 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3929 			delay(10);
   3930 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3931 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3932 			CSR_WRITE_FLUSH(sc);
   3933 		}
   3934 		/* check EECD_EE_AUTORD */
   3935 		wm_get_auto_rd_done(sc);
   3936 		/*
   3937 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3938 		 * is set.
   3939 		 */
   3940 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3941 		    || (sc->sc_type == WM_T_82583))
   3942 			delay(25*1000);
   3943 		break;
   3944 	case WM_T_82575:
   3945 	case WM_T_82576:
   3946 	case WM_T_82580:
   3947 	case WM_T_I350:
   3948 	case WM_T_I354:
   3949 	case WM_T_I210:
   3950 	case WM_T_I211:
   3951 	case WM_T_80003:
   3952 		/* check EECD_EE_AUTORD */
   3953 		wm_get_auto_rd_done(sc);
   3954 		break;
   3955 	case WM_T_ICH8:
   3956 	case WM_T_ICH9:
   3957 	case WM_T_ICH10:
   3958 	case WM_T_PCH:
   3959 	case WM_T_PCH2:
   3960 	case WM_T_PCH_LPT:
   3961 		break;
   3962 	default:
   3963 		panic("%s: unknown type\n", __func__);
   3964 	}
   3965 
   3966 	/* Check whether EEPROM is present or not */
   3967 	switch (sc->sc_type) {
   3968 	case WM_T_82575:
   3969 	case WM_T_82576:
   3970 	case WM_T_82580:
   3971 	case WM_T_I350:
   3972 	case WM_T_I354:
   3973 	case WM_T_ICH8:
   3974 	case WM_T_ICH9:
   3975 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3976 			/* Not found */
   3977 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3978 			if (sc->sc_type == WM_T_82575)
   3979 				wm_reset_init_script_82575(sc);
   3980 		}
   3981 		break;
   3982 	default:
   3983 		break;
   3984 	}
   3985 
   3986 	if ((sc->sc_type == WM_T_82580)
   3987 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3988 		/* clear global device reset status bit */
   3989 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3990 	}
   3991 
   3992 	/* Clear any pending interrupt events. */
   3993 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3994 	reg = CSR_READ(sc, WMREG_ICR);
   3995 	if (sc->sc_nintrs > 1) {
   3996 		if (sc->sc_type != WM_T_82574) {
   3997 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3998 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3999 		} else
   4000 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4001 	}
   4002 
   4003 	/* reload sc_ctrl */
   4004 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4005 
   4006 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4007 		wm_set_eee_i350(sc);
   4008 
   4009 	/* dummy read from WUC */
   4010 	if (sc->sc_type == WM_T_PCH)
   4011 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4012 	/*
   4013 	 * For PCH, this write will make sure that any noise will be detected
   4014 	 * as a CRC error and be dropped rather than show up as a bad packet
   4015 	 * to the DMA engine
   4016 	 */
   4017 	if (sc->sc_type == WM_T_PCH)
   4018 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4019 
   4020 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4021 		CSR_WRITE(sc, WMREG_WUC, 0);
   4022 
   4023 	wm_reset_mdicnfg_82580(sc);
   4024 
   4025 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4026 		wm_pll_workaround_i210(sc);
   4027 }
   4028 
   4029 /*
   4030  * wm_add_rxbuf:
   4031  *
   4032  *	Add a receive buffer to the indiciated descriptor.
   4033  */
   4034 static int
   4035 wm_add_rxbuf(struct wm_softc *sc, int idx)
   4036 {
   4037 	struct wm_rxqueue *rxq = sc->sc_rxq;
   4038 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4039 	struct mbuf *m;
   4040 	int error;
   4041 
   4042 	KASSERT(WM_RX_LOCKED(rxq));
   4043 
   4044 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4045 	if (m == NULL)
   4046 		return ENOBUFS;
   4047 
   4048 	MCLGET(m, M_DONTWAIT);
   4049 	if ((m->m_flags & M_EXT) == 0) {
   4050 		m_freem(m);
   4051 		return ENOBUFS;
   4052 	}
   4053 
   4054 	if (rxs->rxs_mbuf != NULL)
   4055 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4056 
   4057 	rxs->rxs_mbuf = m;
   4058 
   4059 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4060 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4061 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   4062 	if (error) {
   4063 		/* XXX XXX XXX */
   4064 		aprint_error_dev(sc->sc_dev,
   4065 		    "unable to load rx DMA map %d, error = %d\n",
   4066 		    idx, error);
   4067 		panic("wm_add_rxbuf");
   4068 	}
   4069 
   4070 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4071 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4072 
   4073 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4074 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4075 			wm_init_rxdesc(sc, idx);
   4076 	} else
   4077 		wm_init_rxdesc(sc, idx);
   4078 
   4079 	return 0;
   4080 }
   4081 
   4082 /*
   4083  * wm_rxdrain:
   4084  *
   4085  *	Drain the receive queue.
   4086  */
   4087 static void
   4088 wm_rxdrain(struct wm_softc *sc)
   4089 {
   4090 	struct wm_rxqueue *rxq = sc->sc_rxq;
   4091 	struct wm_rxsoft *rxs;
   4092 	int i;
   4093 
   4094 	KASSERT(WM_RX_LOCKED(rxq));
   4095 
   4096 	for (i = 0; i < WM_NRXDESC; i++) {
   4097 		rxs = &rxq->rxq_soft[i];
   4098 		if (rxs->rxs_mbuf != NULL) {
   4099 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4100 			m_freem(rxs->rxs_mbuf);
   4101 			rxs->rxs_mbuf = NULL;
   4102 		}
   4103 	}
   4104 }
   4105 
   4106 /*
   4107  * wm_init:		[ifnet interface function]
   4108  *
   4109  *	Initialize the interface.
   4110  */
   4111 static int
   4112 wm_init(struct ifnet *ifp)
   4113 {
   4114 	struct wm_softc *sc = ifp->if_softc;
   4115 	int ret;
   4116 
   4117 	WM_CORE_LOCK(sc);
   4118 	ret = wm_init_locked(ifp);
   4119 	WM_CORE_UNLOCK(sc);
   4120 
   4121 	return ret;
   4122 }
   4123 
   4124 static int
   4125 wm_init_locked(struct ifnet *ifp)
   4126 {
   4127 	struct wm_softc *sc = ifp->if_softc;
   4128 	int i, j, trynum, error = 0;
   4129 	uint32_t reg;
   4130 
   4131 	KASSERT(WM_CORE_LOCKED(sc));
   4132 	/*
   4133 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4134 	 * There is a small but measurable benefit to avoiding the adjusment
   4135 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4136 	 * on such platforms.  One possibility is that the DMA itself is
   4137 	 * slightly more efficient if the front of the entire packet (instead
   4138 	 * of the front of the headers) is aligned.
   4139 	 *
   4140 	 * Note we must always set align_tweak to 0 if we are using
   4141 	 * jumbo frames.
   4142 	 */
   4143 #ifdef __NO_STRICT_ALIGNMENT
   4144 	sc->sc_align_tweak = 0;
   4145 #else
   4146 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4147 		sc->sc_align_tweak = 0;
   4148 	else
   4149 		sc->sc_align_tweak = 2;
   4150 #endif /* __NO_STRICT_ALIGNMENT */
   4151 
   4152 	/* Cancel any pending I/O. */
   4153 	wm_stop_locked(ifp, 0);
   4154 
   4155 	/* update statistics before reset */
   4156 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4157 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4158 
   4159 	/* Reset the chip to a known state. */
   4160 	wm_reset(sc);
   4161 
   4162 	switch (sc->sc_type) {
   4163 	case WM_T_82571:
   4164 	case WM_T_82572:
   4165 	case WM_T_82573:
   4166 	case WM_T_82574:
   4167 	case WM_T_82583:
   4168 	case WM_T_80003:
   4169 	case WM_T_ICH8:
   4170 	case WM_T_ICH9:
   4171 	case WM_T_ICH10:
   4172 	case WM_T_PCH:
   4173 	case WM_T_PCH2:
   4174 	case WM_T_PCH_LPT:
   4175 		if (wm_check_mng_mode(sc) != 0)
   4176 			wm_get_hw_control(sc);
   4177 		break;
   4178 	default:
   4179 		break;
   4180 	}
   4181 
   4182 	/* Init hardware bits */
   4183 	wm_initialize_hardware_bits(sc);
   4184 
   4185 	/* Reset the PHY. */
   4186 	if (sc->sc_flags & WM_F_HAS_MII)
   4187 		wm_gmii_reset(sc);
   4188 
   4189 	/* Calculate (E)ITR value */
   4190 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4191 		sc->sc_itr = 450;	/* For EITR */
   4192 	} else if (sc->sc_type >= WM_T_82543) {
   4193 		/*
   4194 		 * Set up the interrupt throttling register (units of 256ns)
   4195 		 * Note that a footnote in Intel's documentation says this
   4196 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4197 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4198 		 * that that is also true for the 1024ns units of the other
   4199 		 * interrupt-related timer registers -- so, really, we ought
   4200 		 * to divide this value by 4 when the link speed is low.
   4201 		 *
   4202 		 * XXX implement this division at link speed change!
   4203 		 */
   4204 
   4205 		/*
   4206 		 * For N interrupts/sec, set this value to:
   4207 		 * 1000000000 / (N * 256).  Note that we set the
   4208 		 * absolute and packet timer values to this value
   4209 		 * divided by 4 to get "simple timer" behavior.
   4210 		 */
   4211 
   4212 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4213 	}
   4214 
   4215 	error = wm_init_txrx_queues(sc);
   4216 	if (error)
   4217 		goto out;
   4218 
   4219 	/*
   4220 	 * Clear out the VLAN table -- we don't use it (yet).
   4221 	 */
   4222 	CSR_WRITE(sc, WMREG_VET, 0);
   4223 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4224 		trynum = 10; /* Due to hw errata */
   4225 	else
   4226 		trynum = 1;
   4227 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4228 		for (j = 0; j < trynum; j++)
   4229 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4230 
   4231 	/*
   4232 	 * Set up flow-control parameters.
   4233 	 *
   4234 	 * XXX Values could probably stand some tuning.
   4235 	 */
   4236 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4237 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4238 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4239 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4240 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4241 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4242 	}
   4243 
   4244 	sc->sc_fcrtl = FCRTL_DFLT;
   4245 	if (sc->sc_type < WM_T_82543) {
   4246 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4247 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4248 	} else {
   4249 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4250 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4251 	}
   4252 
   4253 	if (sc->sc_type == WM_T_80003)
   4254 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4255 	else
   4256 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4257 
   4258 	/* Writes the control register. */
   4259 	wm_set_vlan(sc);
   4260 
   4261 	if (sc->sc_flags & WM_F_HAS_MII) {
   4262 		int val;
   4263 
   4264 		switch (sc->sc_type) {
   4265 		case WM_T_80003:
   4266 		case WM_T_ICH8:
   4267 		case WM_T_ICH9:
   4268 		case WM_T_ICH10:
   4269 		case WM_T_PCH:
   4270 		case WM_T_PCH2:
   4271 		case WM_T_PCH_LPT:
   4272 			/*
   4273 			 * Set the mac to wait the maximum time between each
   4274 			 * iteration and increase the max iterations when
   4275 			 * polling the phy; this fixes erroneous timeouts at
   4276 			 * 10Mbps.
   4277 			 */
   4278 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4279 			    0xFFFF);
   4280 			val = wm_kmrn_readreg(sc,
   4281 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4282 			val |= 0x3F;
   4283 			wm_kmrn_writereg(sc,
   4284 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4285 			break;
   4286 		default:
   4287 			break;
   4288 		}
   4289 
   4290 		if (sc->sc_type == WM_T_80003) {
   4291 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4292 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4293 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4294 
   4295 			/* Bypass RX and TX FIFO's */
   4296 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4297 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4298 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4299 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4300 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4301 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4302 		}
   4303 	}
   4304 #if 0
   4305 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4306 #endif
   4307 
   4308 	/* Set up checksum offload parameters. */
   4309 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4310 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4311 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4312 		reg |= RXCSUM_IPOFL;
   4313 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4314 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4315 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4316 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4317 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4318 
   4319 	/* Set up MSI-X */
   4320 	if (sc->sc_nintrs > 1) {
   4321 		uint32_t ivar;
   4322 
   4323 		if (sc->sc_type == WM_T_82575) {
   4324 			/* Interrupt control */
   4325 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4326 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4327 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4328 
   4329 			/* TX */
   4330 			CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
   4331 			    EITR_TX_QUEUE0);
   4332 			/* RX */
   4333 			CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
   4334 			    EITR_RX_QUEUE0);
   4335 			/* Link status */
   4336 			CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
   4337 			    EITR_OTHER);
   4338 		} else if (sc->sc_type == WM_T_82574) {
   4339 			/* Interrupt control */
   4340 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4341 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4342 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4343 
   4344 			/* TX, RX and Link status */
   4345 			ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
   4346 			    IVAR_TX_MASK_Q_82574(0));
   4347 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4348 				| WM_MSIX_RXINTR_IDX),
   4349 			    IVAR_RX_MASK_Q_82574(0));
   4350 			ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
   4351 			    IVAR_OTHER_MASK);
   4352 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4353 		} else {
   4354 			/* Interrupt control */
   4355 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
   4356 			    | GPIE_MULTI_MSIX | GPIE_EIAME
   4357 			    | GPIE_PBA);
   4358 
   4359 			switch (sc->sc_type) {
   4360 			case WM_T_82580:
   4361 			case WM_T_I350:
   4362 			case WM_T_I354:
   4363 			case WM_T_I210:
   4364 			case WM_T_I211:
   4365 				/* TX */
   4366 				ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
   4367 				ivar &= ~IVAR_TX_MASK_Q(0);
   4368 				ivar |= __SHIFTIN(
   4369 					(WM_MSIX_TXINTR_IDX | IVAR_VALID),
   4370 					IVAR_TX_MASK_Q(0));
   4371 				CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
   4372 
   4373 				/* RX */
   4374 				ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
   4375 				ivar &= ~IVAR_RX_MASK_Q(0);
   4376 				ivar |= __SHIFTIN(
   4377 					(WM_MSIX_RXINTR_IDX | IVAR_VALID),
   4378 					IVAR_RX_MASK_Q(0));
   4379 				CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
   4380 				break;
   4381 			case WM_T_82576:
   4382 				/* TX */
   4383 				ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
   4384 				ivar &= ~IVAR_TX_MASK_Q_82576(0);
   4385 				ivar |= __SHIFTIN(
   4386 					(WM_MSIX_TXINTR_IDX | IVAR_VALID),
   4387 					IVAR_TX_MASK_Q_82576(0));
   4388 				CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
   4389 
   4390 				/* RX */
   4391 				ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
   4392 				ivar &= ~IVAR_RX_MASK_Q_82576(0);
   4393 				ivar |= __SHIFTIN(
   4394 					(WM_MSIX_RXINTR_IDX | IVAR_VALID),
   4395 					IVAR_RX_MASK_Q_82576(0));
   4396 				CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
   4397 				break;
   4398 			default:
   4399 				break;
   4400 			}
   4401 
   4402 			/* Link status */
   4403 			ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
   4404 			    IVAR_MISC_OTHER);
   4405 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4406 		}
   4407 	}
   4408 
   4409 	/* Set up the interrupt registers. */
   4410 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4411 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4412 	    ICR_RXO | ICR_RXT0;
   4413 	if (sc->sc_nintrs > 1) {
   4414 		uint32_t mask;
   4415 		switch (sc->sc_type) {
   4416 		case WM_T_82574:
   4417 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4418 			    WMREG_EIAC_82574_MSIX_MASK);
   4419 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4420 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4421 			break;
   4422 		default:
   4423 			if (sc->sc_type == WM_T_82575)
   4424 				mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
   4425 				    | EITR_OTHER;
   4426 			else
   4427 				mask = (1 << WM_MSIX_RXINTR_IDX)
   4428 				    | (1 << WM_MSIX_TXINTR_IDX)
   4429 				    | (1 << WM_MSIX_LINKINTR_IDX);
   4430 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4431 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4432 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4433 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4434 			break;
   4435 		}
   4436 	} else
   4437 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4438 
   4439 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4440 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4441 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4442 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4443 		reg |= KABGTXD_BGSQLBIAS;
   4444 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4445 	}
   4446 
   4447 	/* Set up the inter-packet gap. */
   4448 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4449 
   4450 	if (sc->sc_type >= WM_T_82543) {
   4451 		/*
   4452 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4453 		 * the multi queue function with MSI-X.
   4454 		 */
   4455 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4456 			if (sc->sc_nintrs > 1) {
   4457 				CSR_WRITE(sc, WMREG_EITR(WM_MSIX_RXINTR_IDX),
   4458 				    sc->sc_itr);
   4459 				CSR_WRITE(sc, WMREG_EITR(WM_MSIX_TXINTR_IDX),
   4460 				    sc->sc_itr);
   4461 				/*
   4462 				 * Link interrupts occur much less than TX
   4463 				 * interrupts and RX interrupts. So, we don't
   4464 				 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4465 				 * FreeBSD's if_igb.
   4466 				 */
   4467 			} else
   4468 				CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
   4469 		} else
   4470 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4471 	}
   4472 
   4473 	/* Set the VLAN ethernetype. */
   4474 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4475 
   4476 	/*
   4477 	 * Set up the transmit control register; we start out with
   4478 	 * a collision distance suitable for FDX, but update it whe
   4479 	 * we resolve the media type.
   4480 	 */
   4481 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4482 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4483 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4484 	if (sc->sc_type >= WM_T_82571)
   4485 		sc->sc_tctl |= TCTL_MULR;
   4486 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4487 
   4488 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4489 		/* Write TDT after TCTL.EN is set. See the document. */
   4490 		CSR_WRITE(sc, WMREG_TDT, 0);
   4491 	}
   4492 
   4493 	if (sc->sc_type == WM_T_80003) {
   4494 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4495 		reg &= ~TCTL_EXT_GCEX_MASK;
   4496 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4497 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4498 	}
   4499 
   4500 	/* Set the media. */
   4501 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4502 		goto out;
   4503 
   4504 	/* Configure for OS presence */
   4505 	wm_init_manageability(sc);
   4506 
   4507 	/*
   4508 	 * Set up the receive control register; we actually program
   4509 	 * the register when we set the receive filter.  Use multicast
   4510 	 * address offset type 0.
   4511 	 *
   4512 	 * Only the i82544 has the ability to strip the incoming
   4513 	 * CRC, so we don't enable that feature.
   4514 	 */
   4515 	sc->sc_mchash_type = 0;
   4516 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4517 	    | RCTL_MO(sc->sc_mchash_type);
   4518 
   4519 	/*
   4520 	 * The I350 has a bug where it always strips the CRC whether
   4521 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4522 	 */
   4523 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4524 	    || (sc->sc_type == WM_T_I210))
   4525 		sc->sc_rctl |= RCTL_SECRC;
   4526 
   4527 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4528 	    && (ifp->if_mtu > ETHERMTU)) {
   4529 		sc->sc_rctl |= RCTL_LPE;
   4530 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4531 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4532 	}
   4533 
   4534 	if (MCLBYTES == 2048) {
   4535 		sc->sc_rctl |= RCTL_2k;
   4536 	} else {
   4537 		if (sc->sc_type >= WM_T_82543) {
   4538 			switch (MCLBYTES) {
   4539 			case 4096:
   4540 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4541 				break;
   4542 			case 8192:
   4543 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4544 				break;
   4545 			case 16384:
   4546 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4547 				break;
   4548 			default:
   4549 				panic("wm_init: MCLBYTES %d unsupported",
   4550 				    MCLBYTES);
   4551 				break;
   4552 			}
   4553 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4554 	}
   4555 
   4556 	/* Set the receive filter. */
   4557 	wm_set_filter(sc);
   4558 
   4559 	/* Enable ECC */
   4560 	switch (sc->sc_type) {
   4561 	case WM_T_82571:
   4562 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4563 		reg |= PBA_ECC_CORR_EN;
   4564 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4565 		break;
   4566 	case WM_T_PCH_LPT:
   4567 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4568 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4569 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4570 
   4571 		reg = CSR_READ(sc, WMREG_CTRL);
   4572 		reg |= CTRL_MEHE;
   4573 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4574 		break;
   4575 	default:
   4576 		break;
   4577 	}
   4578 
   4579 	/* On 575 and later set RDT only if RX enabled */
   4580 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4581 		for (i = 0; i < WM_NRXDESC; i++)
   4582 			wm_init_rxdesc(sc, i);
   4583 
   4584 	sc->sc_stopping = false;
   4585 
   4586 	/* Start the one second link check clock. */
   4587 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4588 
   4589 	/* ...all done! */
   4590 	ifp->if_flags |= IFF_RUNNING;
   4591 	ifp->if_flags &= ~IFF_OACTIVE;
   4592 
   4593  out:
   4594 	sc->sc_if_flags = ifp->if_flags;
   4595 	if (error)
   4596 		log(LOG_ERR, "%s: interface not running\n",
   4597 		    device_xname(sc->sc_dev));
   4598 	return error;
   4599 }
   4600 
   4601 /*
   4602  * wm_stop:		[ifnet interface function]
   4603  *
   4604  *	Stop transmission on the interface.
   4605  */
   4606 static void
   4607 wm_stop(struct ifnet *ifp, int disable)
   4608 {
   4609 	struct wm_softc *sc = ifp->if_softc;
   4610 
   4611 	WM_CORE_LOCK(sc);
   4612 	wm_stop_locked(ifp, disable);
   4613 	WM_CORE_UNLOCK(sc);
   4614 }
   4615 
   4616 static void
   4617 wm_stop_locked(struct ifnet *ifp, int disable)
   4618 {
   4619 	struct wm_softc *sc = ifp->if_softc;
   4620 	struct wm_txqueue *txq = sc->sc_txq;
   4621 	struct wm_rxqueue *rxq = sc->sc_rxq;
   4622 	struct wm_txsoft *txs;
   4623 	int i;
   4624 
   4625 	KASSERT(WM_CORE_LOCKED(sc));
   4626 
   4627 	sc->sc_stopping = true;
   4628 
   4629 	/* Stop the one second clock. */
   4630 	callout_stop(&sc->sc_tick_ch);
   4631 
   4632 	/* Stop the 82547 Tx FIFO stall check timer. */
   4633 	if (sc->sc_type == WM_T_82547)
   4634 		callout_stop(&sc->sc_txfifo_ch);
   4635 
   4636 	if (sc->sc_flags & WM_F_HAS_MII) {
   4637 		/* Down the MII. */
   4638 		mii_down(&sc->sc_mii);
   4639 	} else {
   4640 #if 0
   4641 		/* Should we clear PHY's status properly? */
   4642 		wm_reset(sc);
   4643 #endif
   4644 	}
   4645 
   4646 	/* Stop the transmit and receive processes. */
   4647 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4648 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4649 	sc->sc_rctl &= ~RCTL_EN;
   4650 
   4651 	/*
   4652 	 * Clear the interrupt mask to ensure the device cannot assert its
   4653 	 * interrupt line.
   4654 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   4655 	 * service any currently pending or shared interrupt.
   4656 	 */
   4657 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4658 	sc->sc_icr = 0;
   4659 	if (sc->sc_nintrs > 1) {
   4660 		if (sc->sc_type != WM_T_82574) {
   4661 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4662 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4663 		} else
   4664 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4665 	}
   4666 
   4667 	/* Release any queued transmit buffers. */
   4668 	WM_TX_LOCK(txq);
   4669 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   4670 		txs = &txq->txq_soft[i];
   4671 		if (txs->txs_mbuf != NULL) {
   4672 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4673 			m_freem(txs->txs_mbuf);
   4674 			txs->txs_mbuf = NULL;
   4675 		}
   4676 	}
   4677 	WM_TX_UNLOCK(txq);
   4678 
   4679 	/* Mark the interface as down and cancel the watchdog timer. */
   4680 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4681 	ifp->if_timer = 0;
   4682 
   4683 	if (disable) {
   4684 		WM_RX_LOCK(rxq);
   4685 		wm_rxdrain(sc);
   4686 		WM_RX_UNLOCK(rxq);
   4687 	}
   4688 
   4689 #if 0 /* notyet */
   4690 	if (sc->sc_type >= WM_T_82544)
   4691 		CSR_WRITE(sc, WMREG_WUC, 0);
   4692 #endif
   4693 }
   4694 
   4695 /*
   4696  * wm_tx_offload:
   4697  *
   4698  *	Set up TCP/IP checksumming parameters for the
   4699  *	specified packet.
   4700  */
   4701 static int
   4702 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4703     uint8_t *fieldsp)
   4704 {
   4705 	struct wm_txqueue *txq = sc->sc_txq;
   4706 	struct mbuf *m0 = txs->txs_mbuf;
   4707 	struct livengood_tcpip_ctxdesc *t;
   4708 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4709 	uint32_t ipcse;
   4710 	struct ether_header *eh;
   4711 	int offset, iphl;
   4712 	uint8_t fields;
   4713 
   4714 	/*
   4715 	 * XXX It would be nice if the mbuf pkthdr had offset
   4716 	 * fields for the protocol headers.
   4717 	 */
   4718 
   4719 	eh = mtod(m0, struct ether_header *);
   4720 	switch (htons(eh->ether_type)) {
   4721 	case ETHERTYPE_IP:
   4722 	case ETHERTYPE_IPV6:
   4723 		offset = ETHER_HDR_LEN;
   4724 		break;
   4725 
   4726 	case ETHERTYPE_VLAN:
   4727 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4728 		break;
   4729 
   4730 	default:
   4731 		/*
   4732 		 * Don't support this protocol or encapsulation.
   4733 		 */
   4734 		*fieldsp = 0;
   4735 		*cmdp = 0;
   4736 		return 0;
   4737 	}
   4738 
   4739 	if ((m0->m_pkthdr.csum_flags &
   4740 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4741 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4742 	} else {
   4743 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4744 	}
   4745 	ipcse = offset + iphl - 1;
   4746 
   4747 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4748 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4749 	seg = 0;
   4750 	fields = 0;
   4751 
   4752 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4753 		int hlen = offset + iphl;
   4754 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4755 
   4756 		if (__predict_false(m0->m_len <
   4757 				    (hlen + sizeof(struct tcphdr)))) {
   4758 			/*
   4759 			 * TCP/IP headers are not in the first mbuf; we need
   4760 			 * to do this the slow and painful way.  Let's just
   4761 			 * hope this doesn't happen very often.
   4762 			 */
   4763 			struct tcphdr th;
   4764 
   4765 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4766 
   4767 			m_copydata(m0, hlen, sizeof(th), &th);
   4768 			if (v4) {
   4769 				struct ip ip;
   4770 
   4771 				m_copydata(m0, offset, sizeof(ip), &ip);
   4772 				ip.ip_len = 0;
   4773 				m_copyback(m0,
   4774 				    offset + offsetof(struct ip, ip_len),
   4775 				    sizeof(ip.ip_len), &ip.ip_len);
   4776 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4777 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4778 			} else {
   4779 				struct ip6_hdr ip6;
   4780 
   4781 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4782 				ip6.ip6_plen = 0;
   4783 				m_copyback(m0,
   4784 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4785 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4786 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4787 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4788 			}
   4789 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4790 			    sizeof(th.th_sum), &th.th_sum);
   4791 
   4792 			hlen += th.th_off << 2;
   4793 		} else {
   4794 			/*
   4795 			 * TCP/IP headers are in the first mbuf; we can do
   4796 			 * this the easy way.
   4797 			 */
   4798 			struct tcphdr *th;
   4799 
   4800 			if (v4) {
   4801 				struct ip *ip =
   4802 				    (void *)(mtod(m0, char *) + offset);
   4803 				th = (void *)(mtod(m0, char *) + hlen);
   4804 
   4805 				ip->ip_len = 0;
   4806 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4807 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4808 			} else {
   4809 				struct ip6_hdr *ip6 =
   4810 				    (void *)(mtod(m0, char *) + offset);
   4811 				th = (void *)(mtod(m0, char *) + hlen);
   4812 
   4813 				ip6->ip6_plen = 0;
   4814 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4815 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4816 			}
   4817 			hlen += th->th_off << 2;
   4818 		}
   4819 
   4820 		if (v4) {
   4821 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4822 			cmdlen |= WTX_TCPIP_CMD_IP;
   4823 		} else {
   4824 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4825 			ipcse = 0;
   4826 		}
   4827 		cmd |= WTX_TCPIP_CMD_TSE;
   4828 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4829 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4830 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4831 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4832 	}
   4833 
   4834 	/*
   4835 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4836 	 * offload feature, if we load the context descriptor, we
   4837 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4838 	 */
   4839 
   4840 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4841 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4842 	    WTX_TCPIP_IPCSE(ipcse);
   4843 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4844 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4845 		fields |= WTX_IXSM;
   4846 	}
   4847 
   4848 	offset += iphl;
   4849 
   4850 	if (m0->m_pkthdr.csum_flags &
   4851 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4852 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4853 		fields |= WTX_TXSM;
   4854 		tucs = WTX_TCPIP_TUCSS(offset) |
   4855 		    WTX_TCPIP_TUCSO(offset +
   4856 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4857 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4858 	} else if ((m0->m_pkthdr.csum_flags &
   4859 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4860 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4861 		fields |= WTX_TXSM;
   4862 		tucs = WTX_TCPIP_TUCSS(offset) |
   4863 		    WTX_TCPIP_TUCSO(offset +
   4864 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4865 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4866 	} else {
   4867 		/* Just initialize it to a valid TCP context. */
   4868 		tucs = WTX_TCPIP_TUCSS(offset) |
   4869 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4870 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4871 	}
   4872 
   4873 	/* Fill in the context descriptor. */
   4874 	t = (struct livengood_tcpip_ctxdesc *)
   4875 	    &txq->txq_descs[txq->txq_next];
   4876 	t->tcpip_ipcs = htole32(ipcs);
   4877 	t->tcpip_tucs = htole32(tucs);
   4878 	t->tcpip_cmdlen = htole32(cmdlen);
   4879 	t->tcpip_seg = htole32(seg);
   4880 	wm_cdtxsync(sc, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   4881 
   4882 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4883 	txs->txs_ndesc++;
   4884 
   4885 	*cmdp = cmd;
   4886 	*fieldsp = fields;
   4887 
   4888 	return 0;
   4889 }
   4890 
   4891 static void
   4892 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4893 {
   4894 	struct mbuf *m;
   4895 	int i;
   4896 
   4897 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4898 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4899 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4900 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4901 		    m->m_data, m->m_len, m->m_flags);
   4902 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4903 	    i, i == 1 ? "" : "s");
   4904 }
   4905 
   4906 /*
   4907  * wm_82547_txfifo_stall:
   4908  *
   4909  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4910  *	reset the FIFO pointers, and restart packet transmission.
   4911  */
   4912 static void
   4913 wm_82547_txfifo_stall(void *arg)
   4914 {
   4915 	struct wm_softc *sc = arg;
   4916 	struct wm_txqueue *txq = sc->sc_txq;
   4917 #ifndef WM_MPSAFE
   4918 	int s;
   4919 
   4920 	s = splnet();
   4921 #endif
   4922 	WM_TX_LOCK(txq);
   4923 
   4924 	if (sc->sc_stopping)
   4925 		goto out;
   4926 
   4927 	if (txq->txq_fifo_stall) {
   4928 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   4929 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4930 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4931 			/*
   4932 			 * Packets have drained.  Stop transmitter, reset
   4933 			 * FIFO pointers, restart transmitter, and kick
   4934 			 * the packet queue.
   4935 			 */
   4936 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4937 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4938 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   4939 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   4940 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   4941 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   4942 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4943 			CSR_WRITE_FLUSH(sc);
   4944 
   4945 			txq->txq_fifo_head = 0;
   4946 			txq->txq_fifo_stall = 0;
   4947 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4948 		} else {
   4949 			/*
   4950 			 * Still waiting for packets to drain; try again in
   4951 			 * another tick.
   4952 			 */
   4953 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4954 		}
   4955 	}
   4956 
   4957 out:
   4958 	WM_TX_UNLOCK(txq);
   4959 #ifndef WM_MPSAFE
   4960 	splx(s);
   4961 #endif
   4962 }
   4963 
   4964 /*
   4965  * wm_82547_txfifo_bugchk:
   4966  *
   4967  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   4968  *	prevent enqueueing a packet that would wrap around the end
   4969  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   4970  *
   4971  *	We do this by checking the amount of space before the end
   4972  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   4973  *	the Tx FIFO, wait for all remaining packets to drain, reset
   4974  *	the internal FIFO pointers to the beginning, and restart
   4975  *	transmission on the interface.
   4976  */
   4977 #define	WM_FIFO_HDR		0x10
   4978 #define	WM_82547_PAD_LEN	0x3e0
   4979 static int
   4980 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   4981 {
   4982 	struct wm_txqueue *txq = sc->sc_txq;
   4983 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   4984 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   4985 
   4986 	/* Just return if already stalled. */
   4987 	if (txq->txq_fifo_stall)
   4988 		return 1;
   4989 
   4990 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4991 		/* Stall only occurs in half-duplex mode. */
   4992 		goto send_packet;
   4993 	}
   4994 
   4995 	if (len >= WM_82547_PAD_LEN + space) {
   4996 		txq->txq_fifo_stall = 1;
   4997 		callout_schedule(&sc->sc_txfifo_ch, 1);
   4998 		return 1;
   4999 	}
   5000 
   5001  send_packet:
   5002 	txq->txq_fifo_head += len;
   5003 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5004 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5005 
   5006 	return 0;
   5007 }
   5008 
   5009 static int
   5010 wm_alloc_tx_descs(struct wm_softc *sc)
   5011 {
   5012 	struct wm_txqueue *txq = sc->sc_txq;
   5013 	int error;
   5014 
   5015 	/*
   5016 	 * Allocate the control data structures, and create and load the
   5017 	 * DMA map for it.
   5018 	 *
   5019 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5020 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5021 	 * both sets within the same 4G segment.
   5022 	 */
   5023 	if (sc->sc_type < WM_T_82544) {
   5024 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5025 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
   5026 	} else {
   5027 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5028 		txq->txq_desc_size = sizeof(txdescs_t);
   5029 	}
   5030 
   5031 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
   5032 		    (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
   5033 		    &txq->txq_desc_rseg, 0)) != 0) {
   5034 		aprint_error_dev(sc->sc_dev,
   5035 		    "unable to allocate TX control data, error = %d\n",
   5036 		    error);
   5037 		goto fail_0;
   5038 	}
   5039 
   5040 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5041 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5042 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5043 		aprint_error_dev(sc->sc_dev,
   5044 		    "unable to map TX control data, error = %d\n", error);
   5045 		goto fail_1;
   5046 	}
   5047 
   5048 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5049 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5050 		aprint_error_dev(sc->sc_dev,
   5051 		    "unable to create TX control data DMA map, error = %d\n",
   5052 		    error);
   5053 		goto fail_2;
   5054 	}
   5055 
   5056 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5057 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5058 		aprint_error_dev(sc->sc_dev,
   5059 		    "unable to load TX control data DMA map, error = %d\n",
   5060 		    error);
   5061 		goto fail_3;
   5062 	}
   5063 
   5064 	return 0;
   5065 
   5066  fail_3:
   5067 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5068  fail_2:
   5069 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5070 	    txq->txq_desc_size);
   5071  fail_1:
   5072 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5073  fail_0:
   5074 	return error;
   5075 }
   5076 
   5077 static void
   5078 wm_free_tx_descs(struct wm_softc *sc)
   5079 {
   5080 	struct wm_txqueue *txq = sc->sc_txq;
   5081 
   5082 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5083 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5084 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5085 	    txq->txq_desc_size);
   5086 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5087 }
   5088 
   5089 static int
   5090 wm_alloc_rx_descs(struct wm_softc *sc)
   5091 {
   5092 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5093 	int error;
   5094 
   5095 	/*
   5096 	 * Allocate the control data structures, and create and load the
   5097 	 * DMA map for it.
   5098 	 *
   5099 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5100 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5101 	 * both sets within the same 4G segment.
   5102 	 */
   5103 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5104 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
   5105 		    (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
   5106 		    &rxq->rxq_desc_rseg, 0)) != 0) {
   5107 		aprint_error_dev(sc->sc_dev,
   5108 		    "unable to allocate RX control data, error = %d\n",
   5109 		    error);
   5110 		goto fail_0;
   5111 	}
   5112 
   5113 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5114 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5115 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5116 		aprint_error_dev(sc->sc_dev,
   5117 		    "unable to map RX control data, error = %d\n", error);
   5118 		goto fail_1;
   5119 	}
   5120 
   5121 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5122 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5123 		aprint_error_dev(sc->sc_dev,
   5124 		    "unable to create RX control data DMA map, error = %d\n",
   5125 		    error);
   5126 		goto fail_2;
   5127 	}
   5128 
   5129 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5130 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5131 		aprint_error_dev(sc->sc_dev,
   5132 		    "unable to load RX control data DMA map, error = %d\n",
   5133 		    error);
   5134 		goto fail_3;
   5135 	}
   5136 
   5137 	return 0;
   5138 
   5139  fail_3:
   5140 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5141  fail_2:
   5142 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5143 	    rxq->rxq_desc_size);
   5144  fail_1:
   5145 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5146  fail_0:
   5147 	return error;
   5148 }
   5149 
   5150 static void
   5151 wm_free_rx_descs(struct wm_softc *sc)
   5152 {
   5153 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5154 
   5155 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5156 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5157 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5158 	    rxq->rxq_desc_size);
   5159 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5160 }
   5161 
   5162 
   5163 static int
   5164 wm_alloc_tx_buffer(struct wm_softc *sc)
   5165 {
   5166 	struct wm_txqueue *txq = sc->sc_txq;
   5167 	int i, error;
   5168 
   5169 	/* Create the transmit buffer DMA maps. */
   5170 	WM_TXQUEUELEN(txq) =
   5171 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5172 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5173 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5174 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5175 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5176 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5177 			aprint_error_dev(sc->sc_dev,
   5178 			    "unable to create Tx DMA map %d, error = %d\n",
   5179 			    i, error);
   5180 			goto fail;
   5181 		}
   5182 	}
   5183 
   5184 	return 0;
   5185 
   5186  fail:
   5187 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5188 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5189 			bus_dmamap_destroy(sc->sc_dmat,
   5190 			    txq->txq_soft[i].txs_dmamap);
   5191 	}
   5192 	return error;
   5193 }
   5194 
   5195 static void
   5196 wm_free_tx_buffer(struct wm_softc *sc)
   5197 {
   5198 	struct wm_txqueue *txq = sc->sc_txq;
   5199 	int i;
   5200 
   5201 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5202 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5203 			bus_dmamap_destroy(sc->sc_dmat,
   5204 			    txq->txq_soft[i].txs_dmamap);
   5205 	}
   5206 }
   5207 
   5208 static int
   5209 wm_alloc_rx_buffer(struct wm_softc *sc)
   5210 {
   5211 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5212 	int i, error;
   5213 
   5214 	/* Create the receive buffer DMA maps. */
   5215 	for (i = 0; i < WM_NRXDESC; i++) {
   5216 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5217 			    MCLBYTES, 0, 0,
   5218 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5219 			aprint_error_dev(sc->sc_dev,
   5220 			    "unable to create Rx DMA map %d error = %d\n",
   5221 			    i, error);
   5222 			goto fail;
   5223 		}
   5224 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5225 	}
   5226 
   5227 	return 0;
   5228 
   5229  fail:
   5230 	for (i = 0; i < WM_NRXDESC; i++) {
   5231 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5232 			bus_dmamap_destroy(sc->sc_dmat,
   5233 			    rxq->rxq_soft[i].rxs_dmamap);
   5234 	}
   5235 	return error;
   5236 }
   5237 
   5238 static void
   5239 wm_free_rx_buffer(struct wm_softc *sc)
   5240 {
   5241 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5242 	int i;
   5243 
   5244 	for (i = 0; i < WM_NRXDESC; i++) {
   5245 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5246 			bus_dmamap_destroy(sc->sc_dmat,
   5247 			    rxq->rxq_soft[i].rxs_dmamap);
   5248 	}
   5249 }
   5250 
   5251 /*
   5252  * wm_alloc_quques:
   5253  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5254  */
   5255 static int
   5256 wm_alloc_txrx_queues(struct wm_softc *sc)
   5257 {
   5258 	int error;
   5259 
   5260 	/*
   5261 	 * For transmission
   5262 	 */
   5263 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5264 	    KM_SLEEP);
   5265 	if (sc->sc_txq == NULL) {
   5266 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
   5267 		error = ENOMEM;
   5268 		goto fail_0;
   5269 	}
   5270 #ifdef WM_MPSAFE
   5271 		sc->sc_txq->txq_lock =
   5272 			mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5273 #else
   5274 		sc->sc_txq->txq_lock = NULL;
   5275 #endif
   5276 
   5277 	error = wm_alloc_tx_descs(sc);
   5278 	if (error)
   5279 		goto fail_1;
   5280 
   5281 	error = wm_alloc_tx_buffer(sc);
   5282 	if (error)
   5283 		goto fail_2;
   5284 
   5285 	/*
   5286 	 * For recieve
   5287 	 */
   5288 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5289 	    KM_SLEEP);
   5290 	if (sc->sc_rxq == NULL) {
   5291 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
   5292 		error = ENOMEM;
   5293 		goto fail_3;
   5294 	}
   5295 #ifdef WM_MPSAFE
   5296 		sc->sc_rxq->rxq_lock =
   5297 			mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5298 #else
   5299 		sc->sc_rxq->rxq_lock = NULL;
   5300 #endif
   5301 
   5302 	error = wm_alloc_rx_descs(sc);
   5303 	if (error)
   5304 		goto fail_4;
   5305 
   5306 	error = wm_alloc_rx_buffer(sc);
   5307 	if (error)
   5308 		goto fail_5;
   5309 
   5310 	return 0;
   5311 
   5312  fail_5:
   5313 	wm_free_rx_descs(sc);
   5314  fail_4:
   5315 	if (sc->sc_rxq->rxq_lock)
   5316 		mutex_obj_free(sc->sc_rxq->rxq_lock);
   5317 	kmem_free(sc->sc_rxq,
   5318 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5319  fail_3:
   5320 	wm_free_tx_buffer(sc);
   5321  fail_2:
   5322 	wm_free_tx_descs(sc);
   5323  fail_1:
   5324 	if (sc->sc_txq->txq_lock)
   5325 		mutex_obj_free(sc->sc_txq->txq_lock);
   5326 	kmem_free(sc->sc_txq,
   5327 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5328  fail_0:
   5329 	return error;
   5330 }
   5331 
   5332 /*
   5333  * wm_free_quques:
   5334  *	Free {tx,rx}descs and {tx,rx} buffers
   5335  */
   5336 static void
   5337 wm_free_txrx_queues(struct wm_softc *sc)
   5338 {
   5339 
   5340 	wm_free_rx_buffer(sc);
   5341 	wm_free_rx_descs(sc);
   5342 	if (sc->sc_rxq->rxq_lock)
   5343 		mutex_obj_free(sc->sc_rxq->rxq_lock);
   5344 	kmem_free(sc->sc_rxq,
   5345 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5346 
   5347 	wm_free_tx_buffer(sc);
   5348 	wm_free_tx_descs(sc);
   5349 	if (sc->sc_txq->txq_lock)
   5350 		mutex_obj_free(sc->sc_txq->txq_lock);
   5351 	kmem_free(sc->sc_txq,
   5352 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5353 }
   5354 
   5355 static void
   5356 wm_init_tx_descs(struct wm_softc *sc)
   5357 {
   5358 	struct wm_txqueue *txq = sc->sc_txq;
   5359 
   5360 	KASSERT(WM_TX_LOCKED(txq));
   5361 
   5362 	/* Initialize the transmit descriptor ring. */
   5363 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5364 	wm_cdtxsync(sc, 0, WM_NTXDESC(txq),
   5365 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5366 	txq->txq_free = WM_NTXDESC(txq);
   5367 	txq->txq_next = 0;
   5368 
   5369 	if (sc->sc_type < WM_T_82543) {
   5370 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5371 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5372 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5373 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5374 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5375 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5376 	} else {
   5377 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5378 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5379 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(txq));
   5380 		CSR_WRITE(sc, WMREG_TDH, 0);
   5381 
   5382 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5383 			/*
   5384 			 * Don't write TDT before TCTL.EN is set.
   5385 			 * See the document.
   5386 			 */
   5387 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
   5388 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5389 			    | TXDCTL_WTHRESH(0));
   5390 		else {
   5391 			/* ITR / 4 */
   5392 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5393 			if (sc->sc_type >= WM_T_82540) {
   5394 				/* should be same */
   5395 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5396 			}
   5397 
   5398 			CSR_WRITE(sc, WMREG_TDT, 0);
   5399 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
   5400 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5401 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   5402 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5403 		}
   5404 	}
   5405 }
   5406 
   5407 static void
   5408 wm_init_tx_buffer(struct wm_softc *sc)
   5409 {
   5410 	struct wm_txqueue *txq = sc->sc_txq;
   5411 	int i;
   5412 
   5413 	KASSERT(WM_TX_LOCKED(txq));
   5414 
   5415 	/* Initialize the transmit job descriptors. */
   5416 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5417 		txq->txq_soft[i].txs_mbuf = NULL;
   5418 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5419 	txq->txq_snext = 0;
   5420 	txq->txq_sdirty = 0;
   5421 }
   5422 
   5423 static void
   5424 wm_init_tx_queue(struct wm_softc *sc)
   5425 {
   5426 	struct wm_txqueue *txq = sc->sc_txq;
   5427 
   5428 	KASSERT(WM_TX_LOCKED(txq));
   5429 
   5430 	/*
   5431 	 * Set up some register offsets that are different between
   5432 	 * the i82542 and the i82543 and later chips.
   5433 	 */
   5434 	if (sc->sc_type < WM_T_82543) {
   5435 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5436 	} else {
   5437 		txq->txq_tdt_reg = WMREG_TDT;
   5438 	}
   5439 
   5440 	wm_init_tx_descs(sc);
   5441 	wm_init_tx_buffer(sc);
   5442 }
   5443 
   5444 static void
   5445 wm_init_rx_descs(struct wm_softc *sc)
   5446 {
   5447 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5448 
   5449 	KASSERT(WM_RX_LOCKED(rxq));
   5450 
   5451 	/*
   5452 	 * Initialize the receive descriptor and receive job
   5453 	 * descriptor rings.
   5454 	 */
   5455 	if (sc->sc_type < WM_T_82543) {
   5456 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5457 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5458 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5459 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5460 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5461 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5462 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5463 
   5464 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5465 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5466 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5467 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5468 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5469 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5470 	} else {
   5471 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(rxq, 0));
   5472 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(rxq, 0));
   5473 		CSR_WRITE(sc, WMREG_RDLEN,
   5474 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5475 
   5476 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5477 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5478 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5479 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   5480 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5481 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   5482 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5483 			    | RXDCTL_WTHRESH(1));
   5484 		} else {
   5485 			CSR_WRITE(sc, WMREG_RDH, 0);
   5486 			CSR_WRITE(sc, WMREG_RDT, 0);
   5487 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   5488 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   5489 		}
   5490 	}
   5491 }
   5492 
   5493 static int
   5494 wm_init_rx_buffer(struct wm_softc *sc)
   5495 {
   5496 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5497 	struct wm_rxsoft *rxs;
   5498 	int error, i;
   5499 
   5500 	KASSERT(WM_RX_LOCKED(rxq));
   5501 
   5502 	for (i = 0; i < WM_NRXDESC; i++) {
   5503 		rxs = &rxq->rxq_soft[i];
   5504 		if (rxs->rxs_mbuf == NULL) {
   5505 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   5506 				log(LOG_ERR, "%s: unable to allocate or map "
   5507 				    "rx buffer %d, error = %d\n",
   5508 				    device_xname(sc->sc_dev), i, error);
   5509 				/*
   5510 				 * XXX Should attempt to run with fewer receive
   5511 				 * XXX buffers instead of just failing.
   5512 				 */
   5513 				wm_rxdrain(sc);
   5514 				return ENOMEM;
   5515 			}
   5516 		} else {
   5517 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5518 				wm_init_rxdesc(sc, i);
   5519 			/*
   5520 			 * For 82575 and newer device, the RX descriptors
   5521 			 * must be initialized after the setting of RCTL.EN in
   5522 			 * wm_set_filter()
   5523 			 */
   5524 		}
   5525 	}
   5526 	rxq->rxq_ptr = 0;
   5527 	rxq->rxq_discard = 0;
   5528 	WM_RXCHAIN_RESET(rxq);
   5529 
   5530 	return 0;
   5531 }
   5532 
   5533 static int
   5534 wm_init_rx_queue(struct wm_softc *sc)
   5535 {
   5536 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5537 
   5538 	KASSERT(WM_RX_LOCKED(rxq));
   5539 
   5540 	/*
   5541 	 * Set up some register offsets that are different between
   5542 	 * the i82542 and the i82543 and later chips.
   5543 	 */
   5544 	if (sc->sc_type < WM_T_82543) {
   5545 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5546 	} else {
   5547 		rxq->rxq_rdt_reg = WMREG_RDT;
   5548 	}
   5549 
   5550 	wm_init_rx_descs(sc);
   5551 	return wm_init_rx_buffer(sc);
   5552 }
   5553 
   5554 /*
   5555  * wm_init_quques:
   5556  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5557  */
   5558 static int
   5559 wm_init_txrx_queues(struct wm_softc *sc)
   5560 {
   5561 	struct wm_txqueue *txq = sc->sc_txq;
   5562 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5563 	int error;
   5564 
   5565 	WM_TX_LOCK(txq);
   5566 	wm_init_tx_queue(sc);
   5567 	WM_TX_UNLOCK(txq);
   5568 
   5569 	WM_RX_LOCK(rxq);
   5570 	error = wm_init_rx_queue(sc);
   5571 	WM_RX_UNLOCK(rxq);
   5572 
   5573 	return error;
   5574 }
   5575 
   5576 /*
   5577  * wm_start:		[ifnet interface function]
   5578  *
   5579  *	Start packet transmission on the interface.
   5580  */
   5581 static void
   5582 wm_start(struct ifnet *ifp)
   5583 {
   5584 	struct wm_softc *sc = ifp->if_softc;
   5585 	struct wm_txqueue *txq = sc->sc_txq;
   5586 
   5587 	WM_TX_LOCK(txq);
   5588 	if (!sc->sc_stopping)
   5589 		wm_start_locked(ifp);
   5590 	WM_TX_UNLOCK(txq);
   5591 }
   5592 
   5593 static void
   5594 wm_start_locked(struct ifnet *ifp)
   5595 {
   5596 	struct wm_softc *sc = ifp->if_softc;
   5597 	struct wm_txqueue *txq = sc->sc_txq;
   5598 	struct mbuf *m0;
   5599 	struct m_tag *mtag;
   5600 	struct wm_txsoft *txs;
   5601 	bus_dmamap_t dmamap;
   5602 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   5603 	bus_addr_t curaddr;
   5604 	bus_size_t seglen, curlen;
   5605 	uint32_t cksumcmd;
   5606 	uint8_t cksumfields;
   5607 
   5608 	KASSERT(WM_TX_LOCKED(txq));
   5609 
   5610 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   5611 		return;
   5612 
   5613 	/* Remember the previous number of free descriptors. */
   5614 	ofree = txq->txq_free;
   5615 
   5616 	/*
   5617 	 * Loop through the send queue, setting up transmit descriptors
   5618 	 * until we drain the queue, or use up all available transmit
   5619 	 * descriptors.
   5620 	 */
   5621 	for (;;) {
   5622 		m0 = NULL;
   5623 
   5624 		/* Get a work queue entry. */
   5625 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   5626 			wm_txeof(sc);
   5627 			if (txq->txq_sfree == 0) {
   5628 				DPRINTF(WM_DEBUG_TX,
   5629 				    ("%s: TX: no free job descriptors\n",
   5630 					device_xname(sc->sc_dev)));
   5631 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   5632 				break;
   5633 			}
   5634 		}
   5635 
   5636 		/* Grab a packet off the queue. */
   5637 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   5638 		if (m0 == NULL)
   5639 			break;
   5640 
   5641 		DPRINTF(WM_DEBUG_TX,
   5642 		    ("%s: TX: have packet to transmit: %p\n",
   5643 		    device_xname(sc->sc_dev), m0));
   5644 
   5645 		txs = &txq->txq_soft[txq->txq_snext];
   5646 		dmamap = txs->txs_dmamap;
   5647 
   5648 		use_tso = (m0->m_pkthdr.csum_flags &
   5649 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   5650 
   5651 		/*
   5652 		 * So says the Linux driver:
   5653 		 * The controller does a simple calculation to make sure
   5654 		 * there is enough room in the FIFO before initiating the
   5655 		 * DMA for each buffer.  The calc is:
   5656 		 *	4 = ceil(buffer len / MSS)
   5657 		 * To make sure we don't overrun the FIFO, adjust the max
   5658 		 * buffer len if the MSS drops.
   5659 		 */
   5660 		dmamap->dm_maxsegsz =
   5661 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   5662 		    ? m0->m_pkthdr.segsz << 2
   5663 		    : WTX_MAX_LEN;
   5664 
   5665 		/*
   5666 		 * Load the DMA map.  If this fails, the packet either
   5667 		 * didn't fit in the allotted number of segments, or we
   5668 		 * were short on resources.  For the too-many-segments
   5669 		 * case, we simply report an error and drop the packet,
   5670 		 * since we can't sanely copy a jumbo packet to a single
   5671 		 * buffer.
   5672 		 */
   5673 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   5674 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   5675 		if (error) {
   5676 			if (error == EFBIG) {
   5677 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5678 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5679 				    "DMA segments, dropping...\n",
   5680 				    device_xname(sc->sc_dev));
   5681 				wm_dump_mbuf_chain(sc, m0);
   5682 				m_freem(m0);
   5683 				continue;
   5684 			}
   5685 			/*  Short on resources, just stop for now. */
   5686 			DPRINTF(WM_DEBUG_TX,
   5687 			    ("%s: TX: dmamap load failed: %d\n",
   5688 			    device_xname(sc->sc_dev), error));
   5689 			break;
   5690 		}
   5691 
   5692 		segs_needed = dmamap->dm_nsegs;
   5693 		if (use_tso) {
   5694 			/* For sentinel descriptor; see below. */
   5695 			segs_needed++;
   5696 		}
   5697 
   5698 		/*
   5699 		 * Ensure we have enough descriptors free to describe
   5700 		 * the packet.  Note, we always reserve one descriptor
   5701 		 * at the end of the ring due to the semantics of the
   5702 		 * TDT register, plus one more in the event we need
   5703 		 * to load offload context.
   5704 		 */
   5705 		if (segs_needed > txq->txq_free - 2) {
   5706 			/*
   5707 			 * Not enough free descriptors to transmit this
   5708 			 * packet.  We haven't committed anything yet,
   5709 			 * so just unload the DMA map, put the packet
   5710 			 * pack on the queue, and punt.  Notify the upper
   5711 			 * layer that there are no more slots left.
   5712 			 */
   5713 			DPRINTF(WM_DEBUG_TX,
   5714 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5715 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5716 			    segs_needed, sc->sc_txfree - 1));
   5717 			ifp->if_flags |= IFF_OACTIVE;
   5718 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5719 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5720 			break;
   5721 		}
   5722 
   5723 		/*
   5724 		 * Check for 82547 Tx FIFO bug.  We need to do this
   5725 		 * once we know we can transmit the packet, since we
   5726 		 * do some internal FIFO space accounting here.
   5727 		 */
   5728 		if (sc->sc_type == WM_T_82547 &&
   5729 		    wm_82547_txfifo_bugchk(sc, m0)) {
   5730 			DPRINTF(WM_DEBUG_TX,
   5731 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   5732 			    device_xname(sc->sc_dev)));
   5733 			ifp->if_flags |= IFF_OACTIVE;
   5734 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5735 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   5736 			break;
   5737 		}
   5738 
   5739 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5740 
   5741 		DPRINTF(WM_DEBUG_TX,
   5742 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5743 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5744 
   5745 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5746 
   5747 		/*
   5748 		 * Store a pointer to the packet so that we can free it
   5749 		 * later.
   5750 		 *
   5751 		 * Initially, we consider the number of descriptors the
   5752 		 * packet uses the number of DMA segments.  This may be
   5753 		 * incremented by 1 if we do checksum offload (a descriptor
   5754 		 * is used to set the checksum context).
   5755 		 */
   5756 		txs->txs_mbuf = m0;
   5757 		txs->txs_firstdesc = txq->txq_next;
   5758 		txs->txs_ndesc = segs_needed;
   5759 
   5760 		/* Set up offload parameters for this packet. */
   5761 		if (m0->m_pkthdr.csum_flags &
   5762 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5763 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5764 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5765 			if (wm_tx_offload(sc, txs, &cksumcmd,
   5766 					  &cksumfields) != 0) {
   5767 				/* Error message already displayed. */
   5768 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5769 				continue;
   5770 			}
   5771 		} else {
   5772 			cksumcmd = 0;
   5773 			cksumfields = 0;
   5774 		}
   5775 
   5776 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   5777 
   5778 		/* Sync the DMA map. */
   5779 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5780 		    BUS_DMASYNC_PREWRITE);
   5781 
   5782 		/* Initialize the transmit descriptor. */
   5783 		for (nexttx = txq->txq_next, seg = 0;
   5784 		     seg < dmamap->dm_nsegs; seg++) {
   5785 			for (seglen = dmamap->dm_segs[seg].ds_len,
   5786 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   5787 			     seglen != 0;
   5788 			     curaddr += curlen, seglen -= curlen,
   5789 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   5790 				curlen = seglen;
   5791 
   5792 				/*
   5793 				 * So says the Linux driver:
   5794 				 * Work around for premature descriptor
   5795 				 * write-backs in TSO mode.  Append a
   5796 				 * 4-byte sentinel descriptor.
   5797 				 */
   5798 				if (use_tso &&
   5799 				    seg == dmamap->dm_nsegs - 1 &&
   5800 				    curlen > 8)
   5801 					curlen -= 4;
   5802 
   5803 				wm_set_dma_addr(
   5804 				    &txq->txq_descs[nexttx].wtx_addr,
   5805 				    curaddr);
   5806 				txq->txq_descs[nexttx].wtx_cmdlen =
   5807 				    htole32(cksumcmd | curlen);
   5808 				txq->txq_descs[nexttx].wtx_fields.wtxu_status =
   5809 				    0;
   5810 				txq->txq_descs[nexttx].wtx_fields.wtxu_options =
   5811 				    cksumfields;
   5812 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
   5813 				lasttx = nexttx;
   5814 
   5815 				DPRINTF(WM_DEBUG_TX,
   5816 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   5817 				     "len %#04zx\n",
   5818 				    device_xname(sc->sc_dev), nexttx,
   5819 				    (uint64_t)curaddr, curlen));
   5820 			}
   5821 		}
   5822 
   5823 		KASSERT(lasttx != -1);
   5824 
   5825 		/*
   5826 		 * Set up the command byte on the last descriptor of
   5827 		 * the packet.  If we're in the interrupt delay window,
   5828 		 * delay the interrupt.
   5829 		 */
   5830 		txq->txq_descs[lasttx].wtx_cmdlen |=
   5831 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5832 
   5833 		/*
   5834 		 * If VLANs are enabled and the packet has a VLAN tag, set
   5835 		 * up the descriptor to encapsulate the packet for us.
   5836 		 *
   5837 		 * This is only valid on the last descriptor of the packet.
   5838 		 */
   5839 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5840 			txq->txq_descs[lasttx].wtx_cmdlen |=
   5841 			    htole32(WTX_CMD_VLE);
   5842 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   5843 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5844 		}
   5845 
   5846 		txs->txs_lastdesc = lasttx;
   5847 
   5848 		DPRINTF(WM_DEBUG_TX,
   5849 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5850 		    device_xname(sc->sc_dev),
   5851 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5852 
   5853 		/* Sync the descriptors we're using. */
   5854 		wm_cdtxsync(sc, txq->txq_next, txs->txs_ndesc,
   5855 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5856 
   5857 		/* Give the packet to the chip. */
   5858 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   5859 
   5860 		DPRINTF(WM_DEBUG_TX,
   5861 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5862 
   5863 		DPRINTF(WM_DEBUG_TX,
   5864 		    ("%s: TX: finished transmitting packet, job %d\n",
   5865 		    device_xname(sc->sc_dev), txq->txq_txsnext));
   5866 
   5867 		/* Advance the tx pointer. */
   5868 		txq->txq_free -= txs->txs_ndesc;
   5869 		txq->txq_next = nexttx;
   5870 
   5871 		txq->txq_sfree--;
   5872 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   5873 
   5874 		/* Pass the packet to any BPF listeners. */
   5875 		bpf_mtap(ifp, m0);
   5876 	}
   5877 
   5878 	if (m0 != NULL) {
   5879 		ifp->if_flags |= IFF_OACTIVE;
   5880 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5881 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5882 		m_freem(m0);
   5883 	}
   5884 
   5885 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   5886 		/* No more slots; notify upper layer. */
   5887 		ifp->if_flags |= IFF_OACTIVE;
   5888 	}
   5889 
   5890 	if (txq->txq_free != ofree) {
   5891 		/* Set a watchdog timer in case the chip flakes out. */
   5892 		ifp->if_timer = 5;
   5893 	}
   5894 }
   5895 
   5896 /*
   5897  * wm_nq_tx_offload:
   5898  *
   5899  *	Set up TCP/IP checksumming parameters for the
   5900  *	specified packet, for NEWQUEUE devices
   5901  */
   5902 static int
   5903 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   5904     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   5905 {
   5906 	struct wm_txqueue *txq = sc->sc_txq;
   5907 	struct mbuf *m0 = txs->txs_mbuf;
   5908 	struct m_tag *mtag;
   5909 	uint32_t vl_len, mssidx, cmdc;
   5910 	struct ether_header *eh;
   5911 	int offset, iphl;
   5912 
   5913 	/*
   5914 	 * XXX It would be nice if the mbuf pkthdr had offset
   5915 	 * fields for the protocol headers.
   5916 	 */
   5917 	*cmdlenp = 0;
   5918 	*fieldsp = 0;
   5919 
   5920 	eh = mtod(m0, struct ether_header *);
   5921 	switch (htons(eh->ether_type)) {
   5922 	case ETHERTYPE_IP:
   5923 	case ETHERTYPE_IPV6:
   5924 		offset = ETHER_HDR_LEN;
   5925 		break;
   5926 
   5927 	case ETHERTYPE_VLAN:
   5928 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5929 		break;
   5930 
   5931 	default:
   5932 		/* Don't support this protocol or encapsulation. */
   5933 		*do_csum = false;
   5934 		return 0;
   5935 	}
   5936 	*do_csum = true;
   5937 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   5938 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   5939 
   5940 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   5941 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   5942 
   5943 	if ((m0->m_pkthdr.csum_flags &
   5944 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   5945 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5946 	} else {
   5947 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5948 	}
   5949 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   5950 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   5951 
   5952 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5953 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   5954 		     << NQTXC_VLLEN_VLAN_SHIFT);
   5955 		*cmdlenp |= NQTX_CMD_VLE;
   5956 	}
   5957 
   5958 	mssidx = 0;
   5959 
   5960 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5961 		int hlen = offset + iphl;
   5962 		int tcp_hlen;
   5963 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5964 
   5965 		if (__predict_false(m0->m_len <
   5966 				    (hlen + sizeof(struct tcphdr)))) {
   5967 			/*
   5968 			 * TCP/IP headers are not in the first mbuf; we need
   5969 			 * to do this the slow and painful way.  Let's just
   5970 			 * hope this doesn't happen very often.
   5971 			 */
   5972 			struct tcphdr th;
   5973 
   5974 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5975 
   5976 			m_copydata(m0, hlen, sizeof(th), &th);
   5977 			if (v4) {
   5978 				struct ip ip;
   5979 
   5980 				m_copydata(m0, offset, sizeof(ip), &ip);
   5981 				ip.ip_len = 0;
   5982 				m_copyback(m0,
   5983 				    offset + offsetof(struct ip, ip_len),
   5984 				    sizeof(ip.ip_len), &ip.ip_len);
   5985 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5986 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5987 			} else {
   5988 				struct ip6_hdr ip6;
   5989 
   5990 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5991 				ip6.ip6_plen = 0;
   5992 				m_copyback(m0,
   5993 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5994 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5995 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5996 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5997 			}
   5998 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5999 			    sizeof(th.th_sum), &th.th_sum);
   6000 
   6001 			tcp_hlen = th.th_off << 2;
   6002 		} else {
   6003 			/*
   6004 			 * TCP/IP headers are in the first mbuf; we can do
   6005 			 * this the easy way.
   6006 			 */
   6007 			struct tcphdr *th;
   6008 
   6009 			if (v4) {
   6010 				struct ip *ip =
   6011 				    (void *)(mtod(m0, char *) + offset);
   6012 				th = (void *)(mtod(m0, char *) + hlen);
   6013 
   6014 				ip->ip_len = 0;
   6015 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6016 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6017 			} else {
   6018 				struct ip6_hdr *ip6 =
   6019 				    (void *)(mtod(m0, char *) + offset);
   6020 				th = (void *)(mtod(m0, char *) + hlen);
   6021 
   6022 				ip6->ip6_plen = 0;
   6023 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6024 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6025 			}
   6026 			tcp_hlen = th->th_off << 2;
   6027 		}
   6028 		hlen += tcp_hlen;
   6029 		*cmdlenp |= NQTX_CMD_TSE;
   6030 
   6031 		if (v4) {
   6032 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6033 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6034 		} else {
   6035 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6036 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6037 		}
   6038 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6039 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6040 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6041 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6042 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6043 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6044 	} else {
   6045 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6046 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6047 	}
   6048 
   6049 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6050 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6051 		cmdc |= NQTXC_CMD_IP4;
   6052 	}
   6053 
   6054 	if (m0->m_pkthdr.csum_flags &
   6055 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6056 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6057 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6058 			cmdc |= NQTXC_CMD_TCP;
   6059 		} else {
   6060 			cmdc |= NQTXC_CMD_UDP;
   6061 		}
   6062 		cmdc |= NQTXC_CMD_IP4;
   6063 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6064 	}
   6065 	if (m0->m_pkthdr.csum_flags &
   6066 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6067 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6068 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6069 			cmdc |= NQTXC_CMD_TCP;
   6070 		} else {
   6071 			cmdc |= NQTXC_CMD_UDP;
   6072 		}
   6073 		cmdc |= NQTXC_CMD_IP6;
   6074 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6075 	}
   6076 
   6077 	/* Fill in the context descriptor. */
   6078 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6079 	    htole32(vl_len);
   6080 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6081 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6082 	    htole32(cmdc);
   6083 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6084 	    htole32(mssidx);
   6085 	wm_cdtxsync(sc, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6086 	DPRINTF(WM_DEBUG_TX,
   6087 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6088 	    txq->txq_txnext, 0, vl_len));
   6089 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6090 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6091 	txs->txs_ndesc++;
   6092 	return 0;
   6093 }
   6094 
   6095 /*
   6096  * wm_nq_start:		[ifnet interface function]
   6097  *
   6098  *	Start packet transmission on the interface for NEWQUEUE devices
   6099  */
   6100 static void
   6101 wm_nq_start(struct ifnet *ifp)
   6102 {
   6103 	struct wm_softc *sc = ifp->if_softc;
   6104 	struct wm_txqueue *txq = sc->sc_txq;
   6105 
   6106 	WM_TX_LOCK(txq);
   6107 	if (!sc->sc_stopping)
   6108 		wm_nq_start_locked(ifp);
   6109 	WM_TX_UNLOCK(txq);
   6110 }
   6111 
   6112 static void
   6113 wm_nq_start_locked(struct ifnet *ifp)
   6114 {
   6115 	struct wm_softc *sc = ifp->if_softc;
   6116 	struct wm_txqueue *txq = sc->sc_txq;
   6117 	struct mbuf *m0;
   6118 	struct m_tag *mtag;
   6119 	struct wm_txsoft *txs;
   6120 	bus_dmamap_t dmamap;
   6121 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6122 	bool do_csum, sent;
   6123 
   6124 	KASSERT(WM_TX_LOCKED(txq));
   6125 
   6126 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6127 		return;
   6128 
   6129 	sent = false;
   6130 
   6131 	/*
   6132 	 * Loop through the send queue, setting up transmit descriptors
   6133 	 * until we drain the queue, or use up all available transmit
   6134 	 * descriptors.
   6135 	 */
   6136 	for (;;) {
   6137 		m0 = NULL;
   6138 
   6139 		/* Get a work queue entry. */
   6140 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6141 			wm_txeof(sc);
   6142 			if (txq->txq_sfree == 0) {
   6143 				DPRINTF(WM_DEBUG_TX,
   6144 				    ("%s: TX: no free job descriptors\n",
   6145 					device_xname(sc->sc_dev)));
   6146 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6147 				break;
   6148 			}
   6149 		}
   6150 
   6151 		/* Grab a packet off the queue. */
   6152 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6153 		if (m0 == NULL)
   6154 			break;
   6155 
   6156 		DPRINTF(WM_DEBUG_TX,
   6157 		    ("%s: TX: have packet to transmit: %p\n",
   6158 		    device_xname(sc->sc_dev), m0));
   6159 
   6160 		txs = &txq->txq_soft[txq->txq_snext];
   6161 		dmamap = txs->txs_dmamap;
   6162 
   6163 		/*
   6164 		 * Load the DMA map.  If this fails, the packet either
   6165 		 * didn't fit in the allotted number of segments, or we
   6166 		 * were short on resources.  For the too-many-segments
   6167 		 * case, we simply report an error and drop the packet,
   6168 		 * since we can't sanely copy a jumbo packet to a single
   6169 		 * buffer.
   6170 		 */
   6171 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6172 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6173 		if (error) {
   6174 			if (error == EFBIG) {
   6175 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6176 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6177 				    "DMA segments, dropping...\n",
   6178 				    device_xname(sc->sc_dev));
   6179 				wm_dump_mbuf_chain(sc, m0);
   6180 				m_freem(m0);
   6181 				continue;
   6182 			}
   6183 			/* Short on resources, just stop for now. */
   6184 			DPRINTF(WM_DEBUG_TX,
   6185 			    ("%s: TX: dmamap load failed: %d\n",
   6186 			    device_xname(sc->sc_dev), error));
   6187 			break;
   6188 		}
   6189 
   6190 		segs_needed = dmamap->dm_nsegs;
   6191 
   6192 		/*
   6193 		 * Ensure we have enough descriptors free to describe
   6194 		 * the packet.  Note, we always reserve one descriptor
   6195 		 * at the end of the ring due to the semantics of the
   6196 		 * TDT register, plus one more in the event we need
   6197 		 * to load offload context.
   6198 		 */
   6199 		if (segs_needed > txq->txq_free - 2) {
   6200 			/*
   6201 			 * Not enough free descriptors to transmit this
   6202 			 * packet.  We haven't committed anything yet,
   6203 			 * so just unload the DMA map, put the packet
   6204 			 * pack on the queue, and punt.  Notify the upper
   6205 			 * layer that there are no more slots left.
   6206 			 */
   6207 			DPRINTF(WM_DEBUG_TX,
   6208 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6209 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6210 			    segs_needed, sc->sc_txfree - 1));
   6211 			ifp->if_flags |= IFF_OACTIVE;
   6212 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6213 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6214 			break;
   6215 		}
   6216 
   6217 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6218 
   6219 		DPRINTF(WM_DEBUG_TX,
   6220 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6221 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6222 
   6223 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6224 
   6225 		/*
   6226 		 * Store a pointer to the packet so that we can free it
   6227 		 * later.
   6228 		 *
   6229 		 * Initially, we consider the number of descriptors the
   6230 		 * packet uses the number of DMA segments.  This may be
   6231 		 * incremented by 1 if we do checksum offload (a descriptor
   6232 		 * is used to set the checksum context).
   6233 		 */
   6234 		txs->txs_mbuf = m0;
   6235 		txs->txs_firstdesc = txq->txq_next;
   6236 		txs->txs_ndesc = segs_needed;
   6237 
   6238 		/* Set up offload parameters for this packet. */
   6239 		uint32_t cmdlen, fields, dcmdlen;
   6240 		if (m0->m_pkthdr.csum_flags &
   6241 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6242 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6243 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6244 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6245 			    &do_csum) != 0) {
   6246 				/* Error message already displayed. */
   6247 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6248 				continue;
   6249 			}
   6250 		} else {
   6251 			do_csum = false;
   6252 			cmdlen = 0;
   6253 			fields = 0;
   6254 		}
   6255 
   6256 		/* Sync the DMA map. */
   6257 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6258 		    BUS_DMASYNC_PREWRITE);
   6259 
   6260 		/* Initialize the first transmit descriptor. */
   6261 		nexttx = txq->txq_next;
   6262 		if (!do_csum) {
   6263 			/* setup a legacy descriptor */
   6264 			wm_set_dma_addr(
   6265 			    &txq->txq_descs[nexttx].wtx_addr,
   6266 			    dmamap->dm_segs[0].ds_addr);
   6267 			txq->txq_descs[nexttx].wtx_cmdlen =
   6268 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6269 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6270 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6271 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6272 			    NULL) {
   6273 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6274 				    htole32(WTX_CMD_VLE);
   6275 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6276 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6277 			} else {
   6278 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6279 			}
   6280 			dcmdlen = 0;
   6281 		} else {
   6282 			/* setup an advanced data descriptor */
   6283 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6284 			    htole64(dmamap->dm_segs[0].ds_addr);
   6285 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6286 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6287 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6288 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6289 			    htole32(fields);
   6290 			DPRINTF(WM_DEBUG_TX,
   6291 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6292 			    device_xname(sc->sc_dev), nexttx,
   6293 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6294 			DPRINTF(WM_DEBUG_TX,
   6295 			    ("\t 0x%08x%08x\n", fields,
   6296 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6297 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6298 		}
   6299 
   6300 		lasttx = nexttx;
   6301 		nexttx = WM_NEXTTX(txq, nexttx);
   6302 		/*
   6303 		 * fill in the next descriptors. legacy or adcanced format
   6304 		 * is the same here
   6305 		 */
   6306 		for (seg = 1; seg < dmamap->dm_nsegs;
   6307 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6308 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6309 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6310 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6311 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6312 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6313 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6314 			lasttx = nexttx;
   6315 
   6316 			DPRINTF(WM_DEBUG_TX,
   6317 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6318 			     "len %#04zx\n",
   6319 			    device_xname(sc->sc_dev), nexttx,
   6320 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6321 			    dmamap->dm_segs[seg].ds_len));
   6322 		}
   6323 
   6324 		KASSERT(lasttx != -1);
   6325 
   6326 		/*
   6327 		 * Set up the command byte on the last descriptor of
   6328 		 * the packet.  If we're in the interrupt delay window,
   6329 		 * delay the interrupt.
   6330 		 */
   6331 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6332 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6333 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6334 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6335 
   6336 		txs->txs_lastdesc = lasttx;
   6337 
   6338 		DPRINTF(WM_DEBUG_TX,
   6339 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6340 		    device_xname(sc->sc_dev),
   6341 		    lasttx, le32toh(txq->txq_txdescs[lasttx].wtx_cmdlen)));
   6342 
   6343 		/* Sync the descriptors we're using. */
   6344 		wm_cdtxsync(sc, txq->txq_next, txs->txs_ndesc,
   6345 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6346 
   6347 		/* Give the packet to the chip. */
   6348 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6349 		sent = true;
   6350 
   6351 		DPRINTF(WM_DEBUG_TX,
   6352 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6353 
   6354 		DPRINTF(WM_DEBUG_TX,
   6355 		    ("%s: TX: finished transmitting packet, job %d\n",
   6356 		    device_xname(sc->sc_dev), txq->txq_txsnext));
   6357 
   6358 		/* Advance the tx pointer. */
   6359 		txq->txq_free -= txs->txs_ndesc;
   6360 		txq->txq_next = nexttx;
   6361 
   6362 		txq->txq_sfree--;
   6363 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6364 
   6365 		/* Pass the packet to any BPF listeners. */
   6366 		bpf_mtap(ifp, m0);
   6367 	}
   6368 
   6369 	if (m0 != NULL) {
   6370 		ifp->if_flags |= IFF_OACTIVE;
   6371 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6372 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6373 		m_freem(m0);
   6374 	}
   6375 
   6376 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6377 		/* No more slots; notify upper layer. */
   6378 		ifp->if_flags |= IFF_OACTIVE;
   6379 	}
   6380 
   6381 	if (sent) {
   6382 		/* Set a watchdog timer in case the chip flakes out. */
   6383 		ifp->if_timer = 5;
   6384 	}
   6385 }
   6386 
   6387 /* Interrupt */
   6388 
   6389 /*
   6390  * wm_txeof:
   6391  *
   6392  *	Helper; handle transmit interrupts.
   6393  */
   6394 static int
   6395 wm_txeof(struct wm_softc *sc)
   6396 {
   6397 	struct wm_txqueue *txq = sc->sc_txq;
   6398 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6399 	struct wm_txsoft *txs;
   6400 	bool processed = false;
   6401 	int count = 0;
   6402 	int i;
   6403 	uint8_t status;
   6404 
   6405 	if (sc->sc_stopping)
   6406 		return 0;
   6407 
   6408 	ifp->if_flags &= ~IFF_OACTIVE;
   6409 
   6410 	/*
   6411 	 * Go through the Tx list and free mbufs for those
   6412 	 * frames which have been transmitted.
   6413 	 */
   6414 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6415 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6416 		txs = &txq->txq_soft[i];
   6417 
   6418 		DPRINTF(WM_DEBUG_TX,
   6419 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   6420 
   6421 		wm_cdtxsync(sc, txs->txs_firstdesc, txs->txs_ndesc,
   6422 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6423 
   6424 		status =
   6425 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6426 		if ((status & WTX_ST_DD) == 0) {
   6427 			wm_cdtxsync(sc, txs->txs_lastdesc, 1,
   6428 			    BUS_DMASYNC_PREREAD);
   6429 			break;
   6430 		}
   6431 
   6432 		processed = true;
   6433 		count++;
   6434 		DPRINTF(WM_DEBUG_TX,
   6435 		    ("%s: TX: job %d done: descs %d..%d\n",
   6436 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6437 		    txs->txs_lastdesc));
   6438 
   6439 		/*
   6440 		 * XXX We should probably be using the statistics
   6441 		 * XXX registers, but I don't know if they exist
   6442 		 * XXX on chips before the i82544.
   6443 		 */
   6444 
   6445 #ifdef WM_EVENT_COUNTERS
   6446 		if (status & WTX_ST_TU)
   6447 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   6448 #endif /* WM_EVENT_COUNTERS */
   6449 
   6450 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   6451 			ifp->if_oerrors++;
   6452 			if (status & WTX_ST_LC)
   6453 				log(LOG_WARNING, "%s: late collision\n",
   6454 				    device_xname(sc->sc_dev));
   6455 			else if (status & WTX_ST_EC) {
   6456 				ifp->if_collisions += 16;
   6457 				log(LOG_WARNING, "%s: excessive collisions\n",
   6458 				    device_xname(sc->sc_dev));
   6459 			}
   6460 		} else
   6461 			ifp->if_opackets++;
   6462 
   6463 		txq->txq_free += txs->txs_ndesc;
   6464 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   6465 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   6466 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   6467 		m_freem(txs->txs_mbuf);
   6468 		txs->txs_mbuf = NULL;
   6469 	}
   6470 
   6471 	/* Update the dirty transmit buffer pointer. */
   6472 	txq->txq_sdirty = i;
   6473 	DPRINTF(WM_DEBUG_TX,
   6474 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   6475 
   6476 	if (count != 0)
   6477 		rnd_add_uint32(&sc->rnd_source, count);
   6478 
   6479 	/*
   6480 	 * If there are no more pending transmissions, cancel the watchdog
   6481 	 * timer.
   6482 	 */
   6483 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   6484 		ifp->if_timer = 0;
   6485 
   6486 	return processed;
   6487 }
   6488 
   6489 /*
   6490  * wm_rxeof:
   6491  *
   6492  *	Helper; handle receive interrupts.
   6493  */
   6494 static void
   6495 wm_rxeof(struct wm_softc *sc)
   6496 {
   6497 	struct wm_rxqueue *rxq = sc->sc_rxq;
   6498 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6499 	struct wm_rxsoft *rxs;
   6500 	struct mbuf *m;
   6501 	int i, len;
   6502 	int count = 0;
   6503 	uint8_t status, errors;
   6504 	uint16_t vlantag;
   6505 
   6506 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   6507 		rxs = &rxq->rxq_soft[i];
   6508 
   6509 		DPRINTF(WM_DEBUG_RX,
   6510 		    ("%s: RX: checking descriptor %d\n",
   6511 		    device_xname(sc->sc_dev), i));
   6512 
   6513 		wm_cdrxsync(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6514 
   6515 		status = rxq->rxq_descs[i].wrx_status;
   6516 		errors = rxq->rxq_descs[i].wrx_errors;
   6517 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   6518 		vlantag = rxq->rxq_descs[i].wrx_special;
   6519 
   6520 		if ((status & WRX_ST_DD) == 0) {
   6521 			/* We have processed all of the receive descriptors. */
   6522 			wm_cdrxsync(sc, i, BUS_DMASYNC_PREREAD);
   6523 			break;
   6524 		}
   6525 
   6526 		count++;
   6527 		if (__predict_false(rxq->rxq_discard)) {
   6528 			DPRINTF(WM_DEBUG_RX,
   6529 			    ("%s: RX: discarding contents of descriptor %d\n",
   6530 			    device_xname(sc->sc_dev), i));
   6531 			wm_init_rxdesc(sc, i);
   6532 			if (status & WRX_ST_EOP) {
   6533 				/* Reset our state. */
   6534 				DPRINTF(WM_DEBUG_RX,
   6535 				    ("%s: RX: resetting rxdiscard -> 0\n",
   6536 				    device_xname(sc->sc_dev)));
   6537 				rxq->rxq_discard = 0;
   6538 			}
   6539 			continue;
   6540 		}
   6541 
   6542 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6543 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   6544 
   6545 		m = rxs->rxs_mbuf;
   6546 
   6547 		/*
   6548 		 * Add a new receive buffer to the ring, unless of
   6549 		 * course the length is zero. Treat the latter as a
   6550 		 * failed mapping.
   6551 		 */
   6552 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   6553 			/*
   6554 			 * Failed, throw away what we've done so
   6555 			 * far, and discard the rest of the packet.
   6556 			 */
   6557 			ifp->if_ierrors++;
   6558 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6559 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   6560 			wm_init_rxdesc(sc, i);
   6561 			if ((status & WRX_ST_EOP) == 0)
   6562 				rxq->rxq_discard = 1;
   6563 			if (rxq->rxq_head != NULL)
   6564 				m_freem(rxq->rxq_head);
   6565 			WM_RXCHAIN_RESET(rxq);
   6566 			DPRINTF(WM_DEBUG_RX,
   6567 			    ("%s: RX: Rx buffer allocation failed, "
   6568 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   6569 			    rxq->rxq_rxdiscard ? " (discard)" : ""));
   6570 			continue;
   6571 		}
   6572 
   6573 		m->m_len = len;
   6574 		rxq->rxq_len += len;
   6575 		DPRINTF(WM_DEBUG_RX,
   6576 		    ("%s: RX: buffer at %p len %d\n",
   6577 		    device_xname(sc->sc_dev), m->m_data, len));
   6578 
   6579 		/* If this is not the end of the packet, keep looking. */
   6580 		if ((status & WRX_ST_EOP) == 0) {
   6581 			WM_RXCHAIN_LINK(rxq, m);
   6582 			DPRINTF(WM_DEBUG_RX,
   6583 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   6584 			    device_xname(sc->sc_dev), rxq->rxq_rxlen));
   6585 			continue;
   6586 		}
   6587 
   6588 		/*
   6589 		 * Okay, we have the entire packet now.  The chip is
   6590 		 * configured to include the FCS except I350 and I21[01]
   6591 		 * (not all chips can be configured to strip it),
   6592 		 * so we need to trim it.
   6593 		 * May need to adjust length of previous mbuf in the
   6594 		 * chain if the current mbuf is too short.
   6595 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   6596 		 * is always set in I350, so we don't trim it.
   6597 		 */
   6598 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   6599 		    && (sc->sc_type != WM_T_I210)
   6600 		    && (sc->sc_type != WM_T_I211)) {
   6601 			if (m->m_len < ETHER_CRC_LEN) {
   6602 				rxq->rxq_tail->m_len
   6603 				    -= (ETHER_CRC_LEN - m->m_len);
   6604 				m->m_len = 0;
   6605 			} else
   6606 				m->m_len -= ETHER_CRC_LEN;
   6607 			len = rxq->rxq_len - ETHER_CRC_LEN;
   6608 		} else
   6609 			len = rxq->rxq_len;
   6610 
   6611 		WM_RXCHAIN_LINK(rxq, m);
   6612 
   6613 		*rxq->rxq_tailp = NULL;
   6614 		m = rxq->rxq_head;
   6615 
   6616 		WM_RXCHAIN_RESET(rxq);
   6617 
   6618 		DPRINTF(WM_DEBUG_RX,
   6619 		    ("%s: RX: have entire packet, len -> %d\n",
   6620 		    device_xname(sc->sc_dev), len));
   6621 
   6622 		/* If an error occurred, update stats and drop the packet. */
   6623 		if (errors &
   6624 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   6625 			if (errors & WRX_ER_SE)
   6626 				log(LOG_WARNING, "%s: symbol error\n",
   6627 				    device_xname(sc->sc_dev));
   6628 			else if (errors & WRX_ER_SEQ)
   6629 				log(LOG_WARNING, "%s: receive sequence error\n",
   6630 				    device_xname(sc->sc_dev));
   6631 			else if (errors & WRX_ER_CE)
   6632 				log(LOG_WARNING, "%s: CRC error\n",
   6633 				    device_xname(sc->sc_dev));
   6634 			m_freem(m);
   6635 			continue;
   6636 		}
   6637 
   6638 		/* No errors.  Receive the packet. */
   6639 		m->m_pkthdr.rcvif = ifp;
   6640 		m->m_pkthdr.len = len;
   6641 
   6642 		/*
   6643 		 * If VLANs are enabled, VLAN packets have been unwrapped
   6644 		 * for us.  Associate the tag with the packet.
   6645 		 */
   6646 		/* XXXX should check for i350 and i354 */
   6647 		if ((status & WRX_ST_VP) != 0) {
   6648 			VLAN_INPUT_TAG(ifp, m,
   6649 			    le16toh(vlantag),
   6650 			    continue);
   6651 		}
   6652 
   6653 		/* Set up checksum info for this packet. */
   6654 		if ((status & WRX_ST_IXSM) == 0) {
   6655 			if (status & WRX_ST_IPCS) {
   6656 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   6657 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   6658 				if (errors & WRX_ER_IPE)
   6659 					m->m_pkthdr.csum_flags |=
   6660 					    M_CSUM_IPv4_BAD;
   6661 			}
   6662 			if (status & WRX_ST_TCPCS) {
   6663 				/*
   6664 				 * Note: we don't know if this was TCP or UDP,
   6665 				 * so we just set both bits, and expect the
   6666 				 * upper layers to deal.
   6667 				 */
   6668 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   6669 				m->m_pkthdr.csum_flags |=
   6670 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6671 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   6672 				if (errors & WRX_ER_TCPE)
   6673 					m->m_pkthdr.csum_flags |=
   6674 					    M_CSUM_TCP_UDP_BAD;
   6675 			}
   6676 		}
   6677 
   6678 		ifp->if_ipackets++;
   6679 
   6680 		WM_RX_UNLOCK(rxq);
   6681 
   6682 		/* Pass this up to any BPF listeners. */
   6683 		bpf_mtap(ifp, m);
   6684 
   6685 		/* Pass it on. */
   6686 		(*ifp->if_input)(ifp, m);
   6687 
   6688 		WM_RX_LOCK(rxq);
   6689 
   6690 		if (sc->sc_stopping)
   6691 			break;
   6692 	}
   6693 
   6694 	/* Update the receive pointer. */
   6695 	rxq->rxq_ptr = i;
   6696 	if (count != 0)
   6697 		rnd_add_uint32(&sc->rnd_source, count);
   6698 
   6699 	DPRINTF(WM_DEBUG_RX,
   6700 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   6701 }
   6702 
   6703 /*
   6704  * wm_linkintr_gmii:
   6705  *
   6706  *	Helper; handle link interrupts for GMII.
   6707  */
   6708 static void
   6709 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   6710 {
   6711 
   6712 	KASSERT(WM_CORE_LOCKED(sc));
   6713 
   6714 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6715 		__func__));
   6716 
   6717 	if (icr & ICR_LSC) {
   6718 		DPRINTF(WM_DEBUG_LINK,
   6719 		    ("%s: LINK: LSC -> mii_pollstat\n",
   6720 			device_xname(sc->sc_dev)));
   6721 		mii_pollstat(&sc->sc_mii);
   6722 		if (sc->sc_type == WM_T_82543) {
   6723 			int miistatus, active;
   6724 
   6725 			/*
   6726 			 * With 82543, we need to force speed and
   6727 			 * duplex on the MAC equal to what the PHY
   6728 			 * speed and duplex configuration is.
   6729 			 */
   6730 			miistatus = sc->sc_mii.mii_media_status;
   6731 
   6732 			if (miistatus & IFM_ACTIVE) {
   6733 				active = sc->sc_mii.mii_media_active;
   6734 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6735 				switch (IFM_SUBTYPE(active)) {
   6736 				case IFM_10_T:
   6737 					sc->sc_ctrl |= CTRL_SPEED_10;
   6738 					break;
   6739 				case IFM_100_TX:
   6740 					sc->sc_ctrl |= CTRL_SPEED_100;
   6741 					break;
   6742 				case IFM_1000_T:
   6743 					sc->sc_ctrl |= CTRL_SPEED_1000;
   6744 					break;
   6745 				default:
   6746 					/*
   6747 					 * fiber?
   6748 					 * Shoud not enter here.
   6749 					 */
   6750 					printf("unknown media (%x)\n",
   6751 					    active);
   6752 					break;
   6753 				}
   6754 				if (active & IFM_FDX)
   6755 					sc->sc_ctrl |= CTRL_FD;
   6756 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6757 			}
   6758 		} else if ((sc->sc_type == WM_T_ICH8)
   6759 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   6760 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   6761 		} else if (sc->sc_type == WM_T_PCH) {
   6762 			wm_k1_gig_workaround_hv(sc,
   6763 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   6764 		}
   6765 
   6766 		if ((sc->sc_phytype == WMPHY_82578)
   6767 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   6768 			== IFM_1000_T)) {
   6769 
   6770 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   6771 				delay(200*1000); /* XXX too big */
   6772 
   6773 				/* Link stall fix for link up */
   6774 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6775 				    HV_MUX_DATA_CTRL,
   6776 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   6777 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   6778 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6779 				    HV_MUX_DATA_CTRL,
   6780 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   6781 			}
   6782 		}
   6783 	} else if (icr & ICR_RXSEQ) {
   6784 		DPRINTF(WM_DEBUG_LINK,
   6785 		    ("%s: LINK Receive sequence error\n",
   6786 			device_xname(sc->sc_dev)));
   6787 	}
   6788 }
   6789 
   6790 /*
   6791  * wm_linkintr_tbi:
   6792  *
   6793  *	Helper; handle link interrupts for TBI mode.
   6794  */
   6795 static void
   6796 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   6797 {
   6798 	uint32_t status;
   6799 
   6800 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6801 		__func__));
   6802 
   6803 	status = CSR_READ(sc, WMREG_STATUS);
   6804 	if (icr & ICR_LSC) {
   6805 		if (status & STATUS_LU) {
   6806 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   6807 			    device_xname(sc->sc_dev),
   6808 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   6809 			/*
   6810 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   6811 			 * so we should update sc->sc_ctrl
   6812 			 */
   6813 
   6814 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   6815 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6816 			sc->sc_fcrtl &= ~FCRTL_XONE;
   6817 			if (status & STATUS_FD)
   6818 				sc->sc_tctl |=
   6819 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6820 			else
   6821 				sc->sc_tctl |=
   6822 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6823 			if (sc->sc_ctrl & CTRL_TFCE)
   6824 				sc->sc_fcrtl |= FCRTL_XONE;
   6825 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6826 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   6827 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   6828 				      sc->sc_fcrtl);
   6829 			sc->sc_tbi_linkup = 1;
   6830 		} else {
   6831 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   6832 			    device_xname(sc->sc_dev)));
   6833 			sc->sc_tbi_linkup = 0;
   6834 		}
   6835 		/* Update LED */
   6836 		wm_tbi_serdes_set_linkled(sc);
   6837 	} else if (icr & ICR_RXSEQ) {
   6838 		DPRINTF(WM_DEBUG_LINK,
   6839 		    ("%s: LINK: Receive sequence error\n",
   6840 		    device_xname(sc->sc_dev)));
   6841 	}
   6842 }
   6843 
   6844 /*
   6845  * wm_linkintr_serdes:
   6846  *
   6847  *	Helper; handle link interrupts for TBI mode.
   6848  */
   6849 static void
   6850 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   6851 {
   6852 	struct mii_data *mii = &sc->sc_mii;
   6853 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6854 	uint32_t pcs_adv, pcs_lpab, reg;
   6855 
   6856 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6857 		__func__));
   6858 
   6859 	if (icr & ICR_LSC) {
   6860 		/* Check PCS */
   6861 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6862 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   6863 			mii->mii_media_status |= IFM_ACTIVE;
   6864 			sc->sc_tbi_linkup = 1;
   6865 		} else {
   6866 			mii->mii_media_status |= IFM_NONE;
   6867 			sc->sc_tbi_linkup = 0;
   6868 			wm_tbi_serdes_set_linkled(sc);
   6869 			return;
   6870 		}
   6871 		mii->mii_media_active |= IFM_1000_SX;
   6872 		if ((reg & PCS_LSTS_FDX) != 0)
   6873 			mii->mii_media_active |= IFM_FDX;
   6874 		else
   6875 			mii->mii_media_active |= IFM_HDX;
   6876 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6877 			/* Check flow */
   6878 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6879 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   6880 				DPRINTF(WM_DEBUG_LINK,
   6881 				    ("XXX LINKOK but not ACOMP\n"));
   6882 				return;
   6883 			}
   6884 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   6885 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   6886 			DPRINTF(WM_DEBUG_LINK,
   6887 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   6888 			if ((pcs_adv & TXCW_SYM_PAUSE)
   6889 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   6890 				mii->mii_media_active |= IFM_FLOW
   6891 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   6892 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   6893 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6894 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   6895 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6896 				mii->mii_media_active |= IFM_FLOW
   6897 				    | IFM_ETH_TXPAUSE;
   6898 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   6899 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6900 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   6901 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6902 				mii->mii_media_active |= IFM_FLOW
   6903 				    | IFM_ETH_RXPAUSE;
   6904 		}
   6905 		/* Update LED */
   6906 		wm_tbi_serdes_set_linkled(sc);
   6907 	} else {
   6908 		DPRINTF(WM_DEBUG_LINK,
   6909 		    ("%s: LINK: Receive sequence error\n",
   6910 		    device_xname(sc->sc_dev)));
   6911 	}
   6912 }
   6913 
   6914 /*
   6915  * wm_linkintr:
   6916  *
   6917  *	Helper; handle link interrupts.
   6918  */
   6919 static void
   6920 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   6921 {
   6922 
   6923 	KASSERT(WM_CORE_LOCKED(sc));
   6924 
   6925 	if (sc->sc_flags & WM_F_HAS_MII)
   6926 		wm_linkintr_gmii(sc, icr);
   6927 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   6928 	    && (sc->sc_type >= WM_T_82575))
   6929 		wm_linkintr_serdes(sc, icr);
   6930 	else
   6931 		wm_linkintr_tbi(sc, icr);
   6932 }
   6933 
   6934 /*
   6935  * wm_intr_legacy:
   6936  *
   6937  *	Interrupt service routine for INTx and MSI.
   6938  */
   6939 static int
   6940 wm_intr_legacy(void *arg)
   6941 {
   6942 	struct wm_softc *sc = arg;
   6943 	struct wm_txqueue *txq = sc->sc_txq;
   6944 	struct wm_rxqueue *rxq = sc->sc_rxq;
   6945 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6946 	uint32_t icr, rndval = 0;
   6947 	int handled = 0;
   6948 
   6949 	DPRINTF(WM_DEBUG_TX,
   6950 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   6951 	while (1 /* CONSTCOND */) {
   6952 		icr = CSR_READ(sc, WMREG_ICR);
   6953 		if ((icr & sc->sc_icr) == 0)
   6954 			break;
   6955 		if (rndval == 0)
   6956 			rndval = icr;
   6957 
   6958 		WM_RX_LOCK(rxq);
   6959 
   6960 		if (sc->sc_stopping) {
   6961 			WM_RX_UNLOCK(rxq);
   6962 			break;
   6963 		}
   6964 
   6965 		handled = 1;
   6966 
   6967 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   6968 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   6969 			DPRINTF(WM_DEBUG_RX,
   6970 			    ("%s: RX: got Rx intr 0x%08x\n",
   6971 			    device_xname(sc->sc_dev),
   6972 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   6973 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   6974 		}
   6975 #endif
   6976 		wm_rxeof(sc);
   6977 
   6978 		WM_RX_UNLOCK(rxq);
   6979 		WM_TX_LOCK(txq);
   6980 
   6981 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   6982 		if (icr & ICR_TXDW) {
   6983 			DPRINTF(WM_DEBUG_TX,
   6984 			    ("%s: TX: got TXDW interrupt\n",
   6985 			    device_xname(sc->sc_dev)));
   6986 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   6987 		}
   6988 #endif
   6989 		wm_txeof(sc);
   6990 
   6991 		WM_TX_UNLOCK(txq);
   6992 		WM_CORE_LOCK(sc);
   6993 
   6994 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   6995 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   6996 			wm_linkintr(sc, icr);
   6997 		}
   6998 
   6999 		WM_CORE_UNLOCK(sc);
   7000 
   7001 		if (icr & ICR_RXO) {
   7002 #if defined(WM_DEBUG)
   7003 			log(LOG_WARNING, "%s: Receive overrun\n",
   7004 			    device_xname(sc->sc_dev));
   7005 #endif /* defined(WM_DEBUG) */
   7006 		}
   7007 	}
   7008 
   7009 	rnd_add_uint32(&sc->rnd_source, rndval);
   7010 
   7011 	if (handled) {
   7012 		/* Try to get more packets going. */
   7013 		ifp->if_start(ifp);
   7014 	}
   7015 
   7016 	return handled;
   7017 }
   7018 
   7019 #ifdef WM_MSI_MSIX
   7020 /*
   7021  * wm_txintr_msix:
   7022  *
   7023  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7024  */
   7025 static int
   7026 wm_txintr_msix(void *arg)
   7027 {
   7028 	struct wm_softc *sc = arg;
   7029 	struct wm_txqueue *txq = sc->sc_txq;
   7030 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7031 	int handled = 0;
   7032 
   7033 	DPRINTF(WM_DEBUG_TX,
   7034 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7035 
   7036 	if (sc->sc_type == WM_T_82574)
   7037 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
   7038 	else if (sc->sc_type == WM_T_82575)
   7039 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
   7040 	else
   7041 		CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
   7042 
   7043 	WM_TX_LOCK(txq);
   7044 
   7045 	if (sc->sc_stopping)
   7046 		goto out;
   7047 
   7048 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7049 	handled = wm_txeof(sc);
   7050 
   7051 out:
   7052 	WM_TX_UNLOCK(txq);
   7053 
   7054 	if (sc->sc_type == WM_T_82574)
   7055 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
   7056 	else if (sc->sc_type == WM_T_82575)
   7057 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
   7058 	else
   7059 		CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
   7060 
   7061 	if (handled) {
   7062 		/* Try to get more packets going. */
   7063 		ifp->if_start(ifp);
   7064 	}
   7065 
   7066 	return handled;
   7067 }
   7068 
   7069 /*
   7070  * wm_rxintr_msix:
   7071  *
   7072  *	Interrupt service routine for RX interrupt for MSI-X.
   7073  */
   7074 static int
   7075 wm_rxintr_msix(void *arg)
   7076 {
   7077 	struct wm_softc *sc = arg;
   7078 	struct wm_rxqueue *rxq = sc->sc_rxq;
   7079 
   7080 	DPRINTF(WM_DEBUG_TX,
   7081 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7082 
   7083 	if (sc->sc_type == WM_T_82574)
   7084 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
   7085 	else if (sc->sc_type == WM_T_82575)
   7086 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
   7087 	else
   7088 		CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
   7089 
   7090 	WM_RX_LOCK(rxq);
   7091 
   7092 	if (sc->sc_stopping)
   7093 		goto out;
   7094 
   7095 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7096 	wm_rxeof(sc);
   7097 
   7098 out:
   7099 	WM_RX_UNLOCK(rxq);
   7100 
   7101 	if (sc->sc_type == WM_T_82574)
   7102 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
   7103 	else if (sc->sc_type == WM_T_82575)
   7104 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
   7105 	else
   7106 		CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
   7107 
   7108 	return 1;
   7109 }
   7110 
   7111 /*
   7112  * wm_linkintr_msix:
   7113  *
   7114  *	Interrupt service routine for link status change for MSI-X.
   7115  */
   7116 static int
   7117 wm_linkintr_msix(void *arg)
   7118 {
   7119 	struct wm_softc *sc = arg;
   7120 	uint32_t reg;
   7121 
   7122 	DPRINTF(WM_DEBUG_TX,
   7123 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7124 
   7125 	reg = CSR_READ(sc, WMREG_ICR);
   7126 	WM_CORE_LOCK(sc);
   7127 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7128 		goto out;
   7129 
   7130 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7131 	wm_linkintr(sc, ICR_LSC);
   7132 
   7133 out:
   7134 	WM_CORE_UNLOCK(sc);
   7135 
   7136 	if (sc->sc_type == WM_T_82574)
   7137 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
   7138 	else if (sc->sc_type == WM_T_82575)
   7139 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7140 	else
   7141 		CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
   7142 
   7143 	return 1;
   7144 }
   7145 #endif /* WM_MSI_MSIX */
   7146 
   7147 /*
   7148  * Media related.
   7149  * GMII, SGMII, TBI (and SERDES)
   7150  */
   7151 
   7152 /* Common */
   7153 
   7154 /*
   7155  * wm_tbi_serdes_set_linkled:
   7156  *
   7157  *	Update the link LED on TBI and SERDES devices.
   7158  */
   7159 static void
   7160 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7161 {
   7162 
   7163 	if (sc->sc_tbi_linkup)
   7164 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7165 	else
   7166 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7167 
   7168 	/* 82540 or newer devices are active low */
   7169 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7170 
   7171 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7172 }
   7173 
   7174 /* GMII related */
   7175 
   7176 /*
   7177  * wm_gmii_reset:
   7178  *
   7179  *	Reset the PHY.
   7180  */
   7181 static void
   7182 wm_gmii_reset(struct wm_softc *sc)
   7183 {
   7184 	uint32_t reg;
   7185 	int rv;
   7186 
   7187 	/* get phy semaphore */
   7188 	switch (sc->sc_type) {
   7189 	case WM_T_82571:
   7190 	case WM_T_82572:
   7191 	case WM_T_82573:
   7192 	case WM_T_82574:
   7193 	case WM_T_82583:
   7194 		 /* XXX should get sw semaphore, too */
   7195 		rv = wm_get_swsm_semaphore(sc);
   7196 		break;
   7197 	case WM_T_82575:
   7198 	case WM_T_82576:
   7199 	case WM_T_82580:
   7200 	case WM_T_I350:
   7201 	case WM_T_I354:
   7202 	case WM_T_I210:
   7203 	case WM_T_I211:
   7204 	case WM_T_80003:
   7205 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7206 		break;
   7207 	case WM_T_ICH8:
   7208 	case WM_T_ICH9:
   7209 	case WM_T_ICH10:
   7210 	case WM_T_PCH:
   7211 	case WM_T_PCH2:
   7212 	case WM_T_PCH_LPT:
   7213 		rv = wm_get_swfwhw_semaphore(sc);
   7214 		break;
   7215 	default:
   7216 		/* nothing to do*/
   7217 		rv = 0;
   7218 		break;
   7219 	}
   7220 	if (rv != 0) {
   7221 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7222 		    __func__);
   7223 		return;
   7224 	}
   7225 
   7226 	switch (sc->sc_type) {
   7227 	case WM_T_82542_2_0:
   7228 	case WM_T_82542_2_1:
   7229 		/* null */
   7230 		break;
   7231 	case WM_T_82543:
   7232 		/*
   7233 		 * With 82543, we need to force speed and duplex on the MAC
   7234 		 * equal to what the PHY speed and duplex configuration is.
   7235 		 * In addition, we need to perform a hardware reset on the PHY
   7236 		 * to take it out of reset.
   7237 		 */
   7238 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7239 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7240 
   7241 		/* The PHY reset pin is active-low. */
   7242 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7243 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7244 		    CTRL_EXT_SWDPIN(4));
   7245 		reg |= CTRL_EXT_SWDPIO(4);
   7246 
   7247 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7248 		CSR_WRITE_FLUSH(sc);
   7249 		delay(10*1000);
   7250 
   7251 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7252 		CSR_WRITE_FLUSH(sc);
   7253 		delay(150);
   7254 #if 0
   7255 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7256 #endif
   7257 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7258 		break;
   7259 	case WM_T_82544:	/* reset 10000us */
   7260 	case WM_T_82540:
   7261 	case WM_T_82545:
   7262 	case WM_T_82545_3:
   7263 	case WM_T_82546:
   7264 	case WM_T_82546_3:
   7265 	case WM_T_82541:
   7266 	case WM_T_82541_2:
   7267 	case WM_T_82547:
   7268 	case WM_T_82547_2:
   7269 	case WM_T_82571:	/* reset 100us */
   7270 	case WM_T_82572:
   7271 	case WM_T_82573:
   7272 	case WM_T_82574:
   7273 	case WM_T_82575:
   7274 	case WM_T_82576:
   7275 	case WM_T_82580:
   7276 	case WM_T_I350:
   7277 	case WM_T_I354:
   7278 	case WM_T_I210:
   7279 	case WM_T_I211:
   7280 	case WM_T_82583:
   7281 	case WM_T_80003:
   7282 		/* generic reset */
   7283 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7284 		CSR_WRITE_FLUSH(sc);
   7285 		delay(20000);
   7286 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7287 		CSR_WRITE_FLUSH(sc);
   7288 		delay(20000);
   7289 
   7290 		if ((sc->sc_type == WM_T_82541)
   7291 		    || (sc->sc_type == WM_T_82541_2)
   7292 		    || (sc->sc_type == WM_T_82547)
   7293 		    || (sc->sc_type == WM_T_82547_2)) {
   7294 			/* workaround for igp are done in igp_reset() */
   7295 			/* XXX add code to set LED after phy reset */
   7296 		}
   7297 		break;
   7298 	case WM_T_ICH8:
   7299 	case WM_T_ICH9:
   7300 	case WM_T_ICH10:
   7301 	case WM_T_PCH:
   7302 	case WM_T_PCH2:
   7303 	case WM_T_PCH_LPT:
   7304 		/* generic reset */
   7305 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7306 		CSR_WRITE_FLUSH(sc);
   7307 		delay(100);
   7308 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7309 		CSR_WRITE_FLUSH(sc);
   7310 		delay(150);
   7311 		break;
   7312 	default:
   7313 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7314 		    __func__);
   7315 		break;
   7316 	}
   7317 
   7318 	/* release PHY semaphore */
   7319 	switch (sc->sc_type) {
   7320 	case WM_T_82571:
   7321 	case WM_T_82572:
   7322 	case WM_T_82573:
   7323 	case WM_T_82574:
   7324 	case WM_T_82583:
   7325 		 /* XXX should put sw semaphore, too */
   7326 		wm_put_swsm_semaphore(sc);
   7327 		break;
   7328 	case WM_T_82575:
   7329 	case WM_T_82576:
   7330 	case WM_T_82580:
   7331 	case WM_T_I350:
   7332 	case WM_T_I354:
   7333 	case WM_T_I210:
   7334 	case WM_T_I211:
   7335 	case WM_T_80003:
   7336 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7337 		break;
   7338 	case WM_T_ICH8:
   7339 	case WM_T_ICH9:
   7340 	case WM_T_ICH10:
   7341 	case WM_T_PCH:
   7342 	case WM_T_PCH2:
   7343 	case WM_T_PCH_LPT:
   7344 		wm_put_swfwhw_semaphore(sc);
   7345 		break;
   7346 	default:
   7347 		/* nothing to do*/
   7348 		rv = 0;
   7349 		break;
   7350 	}
   7351 
   7352 	/* get_cfg_done */
   7353 	wm_get_cfg_done(sc);
   7354 
   7355 	/* extra setup */
   7356 	switch (sc->sc_type) {
   7357 	case WM_T_82542_2_0:
   7358 	case WM_T_82542_2_1:
   7359 	case WM_T_82543:
   7360 	case WM_T_82544:
   7361 	case WM_T_82540:
   7362 	case WM_T_82545:
   7363 	case WM_T_82545_3:
   7364 	case WM_T_82546:
   7365 	case WM_T_82546_3:
   7366 	case WM_T_82541_2:
   7367 	case WM_T_82547_2:
   7368 	case WM_T_82571:
   7369 	case WM_T_82572:
   7370 	case WM_T_82573:
   7371 	case WM_T_82574:
   7372 	case WM_T_82575:
   7373 	case WM_T_82576:
   7374 	case WM_T_82580:
   7375 	case WM_T_I350:
   7376 	case WM_T_I354:
   7377 	case WM_T_I210:
   7378 	case WM_T_I211:
   7379 	case WM_T_82583:
   7380 	case WM_T_80003:
   7381 		/* null */
   7382 		break;
   7383 	case WM_T_82541:
   7384 	case WM_T_82547:
   7385 		/* XXX Configure actively LED after PHY reset */
   7386 		break;
   7387 	case WM_T_ICH8:
   7388 	case WM_T_ICH9:
   7389 	case WM_T_ICH10:
   7390 	case WM_T_PCH:
   7391 	case WM_T_PCH2:
   7392 	case WM_T_PCH_LPT:
   7393 		/* Allow time for h/w to get to a quiescent state afer reset */
   7394 		delay(10*1000);
   7395 
   7396 		if (sc->sc_type == WM_T_PCH)
   7397 			wm_hv_phy_workaround_ich8lan(sc);
   7398 
   7399 		if (sc->sc_type == WM_T_PCH2)
   7400 			wm_lv_phy_workaround_ich8lan(sc);
   7401 
   7402 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7403 			/*
   7404 			 * dummy read to clear the phy wakeup bit after lcd
   7405 			 * reset
   7406 			 */
   7407 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7408 		}
   7409 
   7410 		/*
   7411 		 * XXX Configure the LCD with th extended configuration region
   7412 		 * in NVM
   7413 		 */
   7414 
   7415 		/* Configure the LCD with the OEM bits in NVM */
   7416 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   7417 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   7418 			/*
   7419 			 * Disable LPLU.
   7420 			 * XXX It seems that 82567 has LPLU, too.
   7421 			 */
   7422 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   7423 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   7424 			reg |= HV_OEM_BITS_ANEGNOW;
   7425 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   7426 		}
   7427 		break;
   7428 	default:
   7429 		panic("%s: unknown type\n", __func__);
   7430 		break;
   7431 	}
   7432 }
   7433 
   7434 /*
   7435  * wm_get_phy_id_82575:
   7436  *
   7437  * Return PHY ID. Return -1 if it failed.
   7438  */
   7439 static int
   7440 wm_get_phy_id_82575(struct wm_softc *sc)
   7441 {
   7442 	uint32_t reg;
   7443 	int phyid = -1;
   7444 
   7445 	/* XXX */
   7446 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7447 		return -1;
   7448 
   7449 	if (wm_sgmii_uses_mdio(sc)) {
   7450 		switch (sc->sc_type) {
   7451 		case WM_T_82575:
   7452 		case WM_T_82576:
   7453 			reg = CSR_READ(sc, WMREG_MDIC);
   7454 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7455 			break;
   7456 		case WM_T_82580:
   7457 		case WM_T_I350:
   7458 		case WM_T_I354:
   7459 		case WM_T_I210:
   7460 		case WM_T_I211:
   7461 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7462 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7463 			break;
   7464 		default:
   7465 			return -1;
   7466 		}
   7467 	}
   7468 
   7469 	return phyid;
   7470 }
   7471 
   7472 
   7473 /*
   7474  * wm_gmii_mediainit:
   7475  *
   7476  *	Initialize media for use on 1000BASE-T devices.
   7477  */
   7478 static void
   7479 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   7480 {
   7481 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7482 	struct mii_data *mii = &sc->sc_mii;
   7483 	uint32_t reg;
   7484 
   7485 	/* We have GMII. */
   7486 	sc->sc_flags |= WM_F_HAS_MII;
   7487 
   7488 	if (sc->sc_type == WM_T_80003)
   7489 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7490 	else
   7491 		sc->sc_tipg = TIPG_1000T_DFLT;
   7492 
   7493 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   7494 	if ((sc->sc_type == WM_T_82580)
   7495 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   7496 	    || (sc->sc_type == WM_T_I211)) {
   7497 		reg = CSR_READ(sc, WMREG_PHPM);
   7498 		reg &= ~PHPM_GO_LINK_D;
   7499 		CSR_WRITE(sc, WMREG_PHPM, reg);
   7500 	}
   7501 
   7502 	/*
   7503 	 * Let the chip set speed/duplex on its own based on
   7504 	 * signals from the PHY.
   7505 	 * XXXbouyer - I'm not sure this is right for the 80003,
   7506 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   7507 	 */
   7508 	sc->sc_ctrl |= CTRL_SLU;
   7509 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7510 
   7511 	/* Initialize our media structures and probe the GMII. */
   7512 	mii->mii_ifp = ifp;
   7513 
   7514 	/*
   7515 	 * Determine the PHY access method.
   7516 	 *
   7517 	 *  For SGMII, use SGMII specific method.
   7518 	 *
   7519 	 *  For some devices, we can determine the PHY access method
   7520 	 * from sc_type.
   7521 	 *
   7522 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   7523 	 * access  method by sc_type, so use the PCI product ID for some
   7524 	 * devices.
   7525 	 * For other ICH8 variants, try to use igp's method. If the PHY
   7526 	 * can't detect, then use bm's method.
   7527 	 */
   7528 	switch (prodid) {
   7529 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   7530 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   7531 		/* 82577 */
   7532 		sc->sc_phytype = WMPHY_82577;
   7533 		break;
   7534 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   7535 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   7536 		/* 82578 */
   7537 		sc->sc_phytype = WMPHY_82578;
   7538 		break;
   7539 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   7540 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   7541 		/* 82579 */
   7542 		sc->sc_phytype = WMPHY_82579;
   7543 		break;
   7544 	case PCI_PRODUCT_INTEL_82801I_BM:
   7545 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   7546 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   7547 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   7548 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   7549 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   7550 		/* 82567 */
   7551 		sc->sc_phytype = WMPHY_BM;
   7552 		mii->mii_readreg = wm_gmii_bm_readreg;
   7553 		mii->mii_writereg = wm_gmii_bm_writereg;
   7554 		break;
   7555 	default:
   7556 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   7557 		    && !wm_sgmii_uses_mdio(sc)){
   7558 			/* SGMII */
   7559 			mii->mii_readreg = wm_sgmii_readreg;
   7560 			mii->mii_writereg = wm_sgmii_writereg;
   7561 		} else if (sc->sc_type >= WM_T_80003) {
   7562 			/* 80003 */
   7563 			mii->mii_readreg = wm_gmii_i80003_readreg;
   7564 			mii->mii_writereg = wm_gmii_i80003_writereg;
   7565 		} else if (sc->sc_type >= WM_T_I210) {
   7566 			/* I210 and I211 */
   7567 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   7568 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   7569 		} else if (sc->sc_type >= WM_T_82580) {
   7570 			/* 82580, I350 and I354 */
   7571 			sc->sc_phytype = WMPHY_82580;
   7572 			mii->mii_readreg = wm_gmii_82580_readreg;
   7573 			mii->mii_writereg = wm_gmii_82580_writereg;
   7574 		} else if (sc->sc_type >= WM_T_82544) {
   7575 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   7576 			mii->mii_readreg = wm_gmii_i82544_readreg;
   7577 			mii->mii_writereg = wm_gmii_i82544_writereg;
   7578 		} else {
   7579 			mii->mii_readreg = wm_gmii_i82543_readreg;
   7580 			mii->mii_writereg = wm_gmii_i82543_writereg;
   7581 		}
   7582 		break;
   7583 	}
   7584 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   7585 		/* All PCH* use _hv_ */
   7586 		mii->mii_readreg = wm_gmii_hv_readreg;
   7587 		mii->mii_writereg = wm_gmii_hv_writereg;
   7588 	}
   7589 	mii->mii_statchg = wm_gmii_statchg;
   7590 
   7591 	wm_gmii_reset(sc);
   7592 
   7593 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7594 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   7595 	    wm_gmii_mediastatus);
   7596 
   7597 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   7598 	    || (sc->sc_type == WM_T_82580)
   7599 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   7600 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   7601 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   7602 			/* Attach only one port */
   7603 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   7604 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7605 		} else {
   7606 			int i, id;
   7607 			uint32_t ctrl_ext;
   7608 
   7609 			id = wm_get_phy_id_82575(sc);
   7610 			if (id != -1) {
   7611 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   7612 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   7613 			}
   7614 			if ((id == -1)
   7615 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   7616 				/* Power on sgmii phy if it is disabled */
   7617 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   7618 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   7619 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   7620 				CSR_WRITE_FLUSH(sc);
   7621 				delay(300*1000); /* XXX too long */
   7622 
   7623 				/* from 1 to 8 */
   7624 				for (i = 1; i < 8; i++)
   7625 					mii_attach(sc->sc_dev, &sc->sc_mii,
   7626 					    0xffffffff, i, MII_OFFSET_ANY,
   7627 					    MIIF_DOPAUSE);
   7628 
   7629 				/* restore previous sfp cage power state */
   7630 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   7631 			}
   7632 		}
   7633 	} else {
   7634 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   7635 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7636 	}
   7637 
   7638 	/*
   7639 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   7640 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   7641 	 */
   7642 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   7643 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   7644 		wm_set_mdio_slow_mode_hv(sc);
   7645 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   7646 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7647 	}
   7648 
   7649 	/*
   7650 	 * (For ICH8 variants)
   7651 	 * If PHY detection failed, use BM's r/w function and retry.
   7652 	 */
   7653 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   7654 		/* if failed, retry with *_bm_* */
   7655 		mii->mii_readreg = wm_gmii_bm_readreg;
   7656 		mii->mii_writereg = wm_gmii_bm_writereg;
   7657 
   7658 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   7659 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7660 	}
   7661 
   7662 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   7663 		/* Any PHY wasn't find */
   7664 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   7665 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   7666 		sc->sc_phytype = WMPHY_NONE;
   7667 	} else {
   7668 		/*
   7669 		 * PHY Found!
   7670 		 * Check PHY type.
   7671 		 */
   7672 		uint32_t model;
   7673 		struct mii_softc *child;
   7674 
   7675 		child = LIST_FIRST(&mii->mii_phys);
   7676 		if (device_is_a(child->mii_dev, "igphy")) {
   7677 			struct igphy_softc *isc = (struct igphy_softc *)child;
   7678 
   7679 			model = isc->sc_mii.mii_mpd_model;
   7680 			if (model == MII_MODEL_yyINTEL_I82566)
   7681 				sc->sc_phytype = WMPHY_IGP_3;
   7682 		}
   7683 
   7684 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   7685 	}
   7686 }
   7687 
   7688 /*
   7689  * wm_gmii_mediachange:	[ifmedia interface function]
   7690  *
   7691  *	Set hardware to newly-selected media on a 1000BASE-T device.
   7692  */
   7693 static int
   7694 wm_gmii_mediachange(struct ifnet *ifp)
   7695 {
   7696 	struct wm_softc *sc = ifp->if_softc;
   7697 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7698 	int rc;
   7699 
   7700 	if ((ifp->if_flags & IFF_UP) == 0)
   7701 		return 0;
   7702 
   7703 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7704 	sc->sc_ctrl |= CTRL_SLU;
   7705 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   7706 	    || (sc->sc_type > WM_T_82543)) {
   7707 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   7708 	} else {
   7709 		sc->sc_ctrl &= ~CTRL_ASDE;
   7710 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7711 		if (ife->ifm_media & IFM_FDX)
   7712 			sc->sc_ctrl |= CTRL_FD;
   7713 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   7714 		case IFM_10_T:
   7715 			sc->sc_ctrl |= CTRL_SPEED_10;
   7716 			break;
   7717 		case IFM_100_TX:
   7718 			sc->sc_ctrl |= CTRL_SPEED_100;
   7719 			break;
   7720 		case IFM_1000_T:
   7721 			sc->sc_ctrl |= CTRL_SPEED_1000;
   7722 			break;
   7723 		default:
   7724 			panic("wm_gmii_mediachange: bad media 0x%x",
   7725 			    ife->ifm_media);
   7726 		}
   7727 	}
   7728 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7729 	if (sc->sc_type <= WM_T_82543)
   7730 		wm_gmii_reset(sc);
   7731 
   7732 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   7733 		return 0;
   7734 	return rc;
   7735 }
   7736 
   7737 /*
   7738  * wm_gmii_mediastatus:	[ifmedia interface function]
   7739  *
   7740  *	Get the current interface media status on a 1000BASE-T device.
   7741  */
   7742 static void
   7743 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7744 {
   7745 	struct wm_softc *sc = ifp->if_softc;
   7746 
   7747 	ether_mediastatus(ifp, ifmr);
   7748 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   7749 	    | sc->sc_flowflags;
   7750 }
   7751 
   7752 #define	MDI_IO		CTRL_SWDPIN(2)
   7753 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   7754 #define	MDI_CLK		CTRL_SWDPIN(3)
   7755 
   7756 static void
   7757 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   7758 {
   7759 	uint32_t i, v;
   7760 
   7761 	v = CSR_READ(sc, WMREG_CTRL);
   7762 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   7763 	v |= MDI_DIR | CTRL_SWDPIO(3);
   7764 
   7765 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   7766 		if (data & i)
   7767 			v |= MDI_IO;
   7768 		else
   7769 			v &= ~MDI_IO;
   7770 		CSR_WRITE(sc, WMREG_CTRL, v);
   7771 		CSR_WRITE_FLUSH(sc);
   7772 		delay(10);
   7773 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7774 		CSR_WRITE_FLUSH(sc);
   7775 		delay(10);
   7776 		CSR_WRITE(sc, WMREG_CTRL, v);
   7777 		CSR_WRITE_FLUSH(sc);
   7778 		delay(10);
   7779 	}
   7780 }
   7781 
   7782 static uint32_t
   7783 wm_i82543_mii_recvbits(struct wm_softc *sc)
   7784 {
   7785 	uint32_t v, i, data = 0;
   7786 
   7787 	v = CSR_READ(sc, WMREG_CTRL);
   7788 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   7789 	v |= CTRL_SWDPIO(3);
   7790 
   7791 	CSR_WRITE(sc, WMREG_CTRL, v);
   7792 	CSR_WRITE_FLUSH(sc);
   7793 	delay(10);
   7794 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7795 	CSR_WRITE_FLUSH(sc);
   7796 	delay(10);
   7797 	CSR_WRITE(sc, WMREG_CTRL, v);
   7798 	CSR_WRITE_FLUSH(sc);
   7799 	delay(10);
   7800 
   7801 	for (i = 0; i < 16; i++) {
   7802 		data <<= 1;
   7803 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7804 		CSR_WRITE_FLUSH(sc);
   7805 		delay(10);
   7806 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   7807 			data |= 1;
   7808 		CSR_WRITE(sc, WMREG_CTRL, v);
   7809 		CSR_WRITE_FLUSH(sc);
   7810 		delay(10);
   7811 	}
   7812 
   7813 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7814 	CSR_WRITE_FLUSH(sc);
   7815 	delay(10);
   7816 	CSR_WRITE(sc, WMREG_CTRL, v);
   7817 	CSR_WRITE_FLUSH(sc);
   7818 	delay(10);
   7819 
   7820 	return data;
   7821 }
   7822 
   7823 #undef MDI_IO
   7824 #undef MDI_DIR
   7825 #undef MDI_CLK
   7826 
   7827 /*
   7828  * wm_gmii_i82543_readreg:	[mii interface function]
   7829  *
   7830  *	Read a PHY register on the GMII (i82543 version).
   7831  */
   7832 static int
   7833 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   7834 {
   7835 	struct wm_softc *sc = device_private(self);
   7836 	int rv;
   7837 
   7838 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   7839 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   7840 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   7841 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   7842 
   7843 	DPRINTF(WM_DEBUG_GMII,
   7844 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   7845 	    device_xname(sc->sc_dev), phy, reg, rv));
   7846 
   7847 	return rv;
   7848 }
   7849 
   7850 /*
   7851  * wm_gmii_i82543_writereg:	[mii interface function]
   7852  *
   7853  *	Write a PHY register on the GMII (i82543 version).
   7854  */
   7855 static void
   7856 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   7857 {
   7858 	struct wm_softc *sc = device_private(self);
   7859 
   7860 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   7861 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   7862 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   7863 	    (MII_COMMAND_START << 30), 32);
   7864 }
   7865 
   7866 /*
   7867  * wm_gmii_i82544_readreg:	[mii interface function]
   7868  *
   7869  *	Read a PHY register on the GMII.
   7870  */
   7871 static int
   7872 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   7873 {
   7874 	struct wm_softc *sc = device_private(self);
   7875 	uint32_t mdic = 0;
   7876 	int i, rv;
   7877 
   7878 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   7879 	    MDIC_REGADD(reg));
   7880 
   7881 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7882 		mdic = CSR_READ(sc, WMREG_MDIC);
   7883 		if (mdic & MDIC_READY)
   7884 			break;
   7885 		delay(50);
   7886 	}
   7887 
   7888 	if ((mdic & MDIC_READY) == 0) {
   7889 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   7890 		    device_xname(sc->sc_dev), phy, reg);
   7891 		rv = 0;
   7892 	} else if (mdic & MDIC_E) {
   7893 #if 0 /* This is normal if no PHY is present. */
   7894 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   7895 		    device_xname(sc->sc_dev), phy, reg);
   7896 #endif
   7897 		rv = 0;
   7898 	} else {
   7899 		rv = MDIC_DATA(mdic);
   7900 		if (rv == 0xffff)
   7901 			rv = 0;
   7902 	}
   7903 
   7904 	return rv;
   7905 }
   7906 
   7907 /*
   7908  * wm_gmii_i82544_writereg:	[mii interface function]
   7909  *
   7910  *	Write a PHY register on the GMII.
   7911  */
   7912 static void
   7913 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   7914 {
   7915 	struct wm_softc *sc = device_private(self);
   7916 	uint32_t mdic = 0;
   7917 	int i;
   7918 
   7919 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   7920 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   7921 
   7922 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7923 		mdic = CSR_READ(sc, WMREG_MDIC);
   7924 		if (mdic & MDIC_READY)
   7925 			break;
   7926 		delay(50);
   7927 	}
   7928 
   7929 	if ((mdic & MDIC_READY) == 0)
   7930 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   7931 		    device_xname(sc->sc_dev), phy, reg);
   7932 	else if (mdic & MDIC_E)
   7933 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   7934 		    device_xname(sc->sc_dev), phy, reg);
   7935 }
   7936 
   7937 /*
   7938  * wm_gmii_i80003_readreg:	[mii interface function]
   7939  *
   7940  *	Read a PHY register on the kumeran
   7941  * This could be handled by the PHY layer if we didn't have to lock the
   7942  * ressource ...
   7943  */
   7944 static int
   7945 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   7946 {
   7947 	struct wm_softc *sc = device_private(self);
   7948 	int sem;
   7949 	int rv;
   7950 
   7951 	if (phy != 1) /* only one PHY on kumeran bus */
   7952 		return 0;
   7953 
   7954 	sem = swfwphysem[sc->sc_funcid];
   7955 	if (wm_get_swfw_semaphore(sc, sem)) {
   7956 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7957 		    __func__);
   7958 		return 0;
   7959 	}
   7960 
   7961 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7962 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7963 		    reg >> GG82563_PAGE_SHIFT);
   7964 	} else {
   7965 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   7966 		    reg >> GG82563_PAGE_SHIFT);
   7967 	}
   7968 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   7969 	delay(200);
   7970 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   7971 	delay(200);
   7972 
   7973 	wm_put_swfw_semaphore(sc, sem);
   7974 	return rv;
   7975 }
   7976 
   7977 /*
   7978  * wm_gmii_i80003_writereg:	[mii interface function]
   7979  *
   7980  *	Write a PHY register on the kumeran.
   7981  * This could be handled by the PHY layer if we didn't have to lock the
   7982  * ressource ...
   7983  */
   7984 static void
   7985 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   7986 {
   7987 	struct wm_softc *sc = device_private(self);
   7988 	int sem;
   7989 
   7990 	if (phy != 1) /* only one PHY on kumeran bus */
   7991 		return;
   7992 
   7993 	sem = swfwphysem[sc->sc_funcid];
   7994 	if (wm_get_swfw_semaphore(sc, sem)) {
   7995 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7996 		    __func__);
   7997 		return;
   7998 	}
   7999 
   8000 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8001 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8002 		    reg >> GG82563_PAGE_SHIFT);
   8003 	} else {
   8004 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8005 		    reg >> GG82563_PAGE_SHIFT);
   8006 	}
   8007 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8008 	delay(200);
   8009 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8010 	delay(200);
   8011 
   8012 	wm_put_swfw_semaphore(sc, sem);
   8013 }
   8014 
   8015 /*
   8016  * wm_gmii_bm_readreg:	[mii interface function]
   8017  *
   8018  *	Read a PHY register on the kumeran
   8019  * This could be handled by the PHY layer if we didn't have to lock the
   8020  * ressource ...
   8021  */
   8022 static int
   8023 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8024 {
   8025 	struct wm_softc *sc = device_private(self);
   8026 	int sem;
   8027 	int rv;
   8028 
   8029 	sem = swfwphysem[sc->sc_funcid];
   8030 	if (wm_get_swfw_semaphore(sc, sem)) {
   8031 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8032 		    __func__);
   8033 		return 0;
   8034 	}
   8035 
   8036 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8037 		if (phy == 1)
   8038 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8039 			    reg);
   8040 		else
   8041 			wm_gmii_i82544_writereg(self, phy,
   8042 			    GG82563_PHY_PAGE_SELECT,
   8043 			    reg >> GG82563_PAGE_SHIFT);
   8044 	}
   8045 
   8046 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8047 	wm_put_swfw_semaphore(sc, sem);
   8048 	return rv;
   8049 }
   8050 
   8051 /*
   8052  * wm_gmii_bm_writereg:	[mii interface function]
   8053  *
   8054  *	Write a PHY register on the kumeran.
   8055  * This could be handled by the PHY layer if we didn't have to lock the
   8056  * ressource ...
   8057  */
   8058 static void
   8059 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8060 {
   8061 	struct wm_softc *sc = device_private(self);
   8062 	int sem;
   8063 
   8064 	sem = swfwphysem[sc->sc_funcid];
   8065 	if (wm_get_swfw_semaphore(sc, sem)) {
   8066 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8067 		    __func__);
   8068 		return;
   8069 	}
   8070 
   8071 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8072 		if (phy == 1)
   8073 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8074 			    reg);
   8075 		else
   8076 			wm_gmii_i82544_writereg(self, phy,
   8077 			    GG82563_PHY_PAGE_SELECT,
   8078 			    reg >> GG82563_PAGE_SHIFT);
   8079 	}
   8080 
   8081 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8082 	wm_put_swfw_semaphore(sc, sem);
   8083 }
   8084 
   8085 static void
   8086 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8087 {
   8088 	struct wm_softc *sc = device_private(self);
   8089 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8090 	uint16_t wuce;
   8091 
   8092 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8093 	if (sc->sc_type == WM_T_PCH) {
   8094 		/* XXX e1000 driver do nothing... why? */
   8095 	}
   8096 
   8097 	/* Set page 769 */
   8098 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8099 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8100 
   8101 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8102 
   8103 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8104 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8105 	    wuce | BM_WUC_ENABLE_BIT);
   8106 
   8107 	/* Select page 800 */
   8108 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8109 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8110 
   8111 	/* Write page 800 */
   8112 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8113 
   8114 	if (rd)
   8115 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8116 	else
   8117 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8118 
   8119 	/* Set page 769 */
   8120 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8121 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8122 
   8123 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8124 }
   8125 
   8126 /*
   8127  * wm_gmii_hv_readreg:	[mii interface function]
   8128  *
   8129  *	Read a PHY register on the kumeran
   8130  * This could be handled by the PHY layer if we didn't have to lock the
   8131  * ressource ...
   8132  */
   8133 static int
   8134 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8135 {
   8136 	struct wm_softc *sc = device_private(self);
   8137 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8138 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8139 	uint16_t val;
   8140 	int rv;
   8141 
   8142 	if (wm_get_swfwhw_semaphore(sc)) {
   8143 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8144 		    __func__);
   8145 		return 0;
   8146 	}
   8147 
   8148 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8149 	if (sc->sc_phytype == WMPHY_82577) {
   8150 		/* XXX must write */
   8151 	}
   8152 
   8153 	/* Page 800 works differently than the rest so it has its own func */
   8154 	if (page == BM_WUC_PAGE) {
   8155 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8156 		return val;
   8157 	}
   8158 
   8159 	/*
   8160 	 * Lower than page 768 works differently than the rest so it has its
   8161 	 * own func
   8162 	 */
   8163 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8164 		printf("gmii_hv_readreg!!!\n");
   8165 		return 0;
   8166 	}
   8167 
   8168 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8169 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8170 		    page << BME1000_PAGE_SHIFT);
   8171 	}
   8172 
   8173 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8174 	wm_put_swfwhw_semaphore(sc);
   8175 	return rv;
   8176 }
   8177 
   8178 /*
   8179  * wm_gmii_hv_writereg:	[mii interface function]
   8180  *
   8181  *	Write a PHY register on the kumeran.
   8182  * This could be handled by the PHY layer if we didn't have to lock the
   8183  * ressource ...
   8184  */
   8185 static void
   8186 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8187 {
   8188 	struct wm_softc *sc = device_private(self);
   8189 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8190 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8191 
   8192 	if (wm_get_swfwhw_semaphore(sc)) {
   8193 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8194 		    __func__);
   8195 		return;
   8196 	}
   8197 
   8198 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8199 
   8200 	/* Page 800 works differently than the rest so it has its own func */
   8201 	if (page == BM_WUC_PAGE) {
   8202 		uint16_t tmp;
   8203 
   8204 		tmp = val;
   8205 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8206 		return;
   8207 	}
   8208 
   8209 	/*
   8210 	 * Lower than page 768 works differently than the rest so it has its
   8211 	 * own func
   8212 	 */
   8213 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8214 		printf("gmii_hv_writereg!!!\n");
   8215 		return;
   8216 	}
   8217 
   8218 	/*
   8219 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8220 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8221 	 */
   8222 
   8223 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8224 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8225 		    page << BME1000_PAGE_SHIFT);
   8226 	}
   8227 
   8228 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8229 	wm_put_swfwhw_semaphore(sc);
   8230 }
   8231 
   8232 /*
   8233  * wm_gmii_82580_readreg:	[mii interface function]
   8234  *
   8235  *	Read a PHY register on the 82580 and I350.
   8236  * This could be handled by the PHY layer if we didn't have to lock the
   8237  * ressource ...
   8238  */
   8239 static int
   8240 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8241 {
   8242 	struct wm_softc *sc = device_private(self);
   8243 	int sem;
   8244 	int rv;
   8245 
   8246 	sem = swfwphysem[sc->sc_funcid];
   8247 	if (wm_get_swfw_semaphore(sc, sem)) {
   8248 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8249 		    __func__);
   8250 		return 0;
   8251 	}
   8252 
   8253 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8254 
   8255 	wm_put_swfw_semaphore(sc, sem);
   8256 	return rv;
   8257 }
   8258 
   8259 /*
   8260  * wm_gmii_82580_writereg:	[mii interface function]
   8261  *
   8262  *	Write a PHY register on the 82580 and I350.
   8263  * This could be handled by the PHY layer if we didn't have to lock the
   8264  * ressource ...
   8265  */
   8266 static void
   8267 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8268 {
   8269 	struct wm_softc *sc = device_private(self);
   8270 	int sem;
   8271 
   8272 	sem = swfwphysem[sc->sc_funcid];
   8273 	if (wm_get_swfw_semaphore(sc, sem)) {
   8274 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8275 		    __func__);
   8276 		return;
   8277 	}
   8278 
   8279 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8280 
   8281 	wm_put_swfw_semaphore(sc, sem);
   8282 }
   8283 
   8284 /*
   8285  * wm_gmii_gs40g_readreg:	[mii interface function]
   8286  *
   8287  *	Read a PHY register on the I2100 and I211.
   8288  * This could be handled by the PHY layer if we didn't have to lock the
   8289  * ressource ...
   8290  */
   8291 static int
   8292 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8293 {
   8294 	struct wm_softc *sc = device_private(self);
   8295 	int sem;
   8296 	int page, offset;
   8297 	int rv;
   8298 
   8299 	/* Acquire semaphore */
   8300 	sem = swfwphysem[sc->sc_funcid];
   8301 	if (wm_get_swfw_semaphore(sc, sem)) {
   8302 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8303 		    __func__);
   8304 		return 0;
   8305 	}
   8306 
   8307 	/* Page select */
   8308 	page = reg >> GS40G_PAGE_SHIFT;
   8309 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8310 
   8311 	/* Read reg */
   8312 	offset = reg & GS40G_OFFSET_MASK;
   8313 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8314 
   8315 	wm_put_swfw_semaphore(sc, sem);
   8316 	return rv;
   8317 }
   8318 
   8319 /*
   8320  * wm_gmii_gs40g_writereg:	[mii interface function]
   8321  *
   8322  *	Write a PHY register on the I210 and I211.
   8323  * This could be handled by the PHY layer if we didn't have to lock the
   8324  * ressource ...
   8325  */
   8326 static void
   8327 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8328 {
   8329 	struct wm_softc *sc = device_private(self);
   8330 	int sem;
   8331 	int page, offset;
   8332 
   8333 	/* Acquire semaphore */
   8334 	sem = swfwphysem[sc->sc_funcid];
   8335 	if (wm_get_swfw_semaphore(sc, sem)) {
   8336 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8337 		    __func__);
   8338 		return;
   8339 	}
   8340 
   8341 	/* Page select */
   8342 	page = reg >> GS40G_PAGE_SHIFT;
   8343 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8344 
   8345 	/* Write reg */
   8346 	offset = reg & GS40G_OFFSET_MASK;
   8347 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8348 
   8349 	/* Release semaphore */
   8350 	wm_put_swfw_semaphore(sc, sem);
   8351 }
   8352 
   8353 /*
   8354  * wm_gmii_statchg:	[mii interface function]
   8355  *
   8356  *	Callback from MII layer when media changes.
   8357  */
   8358 static void
   8359 wm_gmii_statchg(struct ifnet *ifp)
   8360 {
   8361 	struct wm_softc *sc = ifp->if_softc;
   8362 	struct mii_data *mii = &sc->sc_mii;
   8363 
   8364 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8365 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8366 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8367 
   8368 	/*
   8369 	 * Get flow control negotiation result.
   8370 	 */
   8371 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8372 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8373 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8374 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8375 	}
   8376 
   8377 	if (sc->sc_flowflags & IFM_FLOW) {
   8378 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8379 			sc->sc_ctrl |= CTRL_TFCE;
   8380 			sc->sc_fcrtl |= FCRTL_XONE;
   8381 		}
   8382 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8383 			sc->sc_ctrl |= CTRL_RFCE;
   8384 	}
   8385 
   8386 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8387 		DPRINTF(WM_DEBUG_LINK,
   8388 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8389 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8390 	} else {
   8391 		DPRINTF(WM_DEBUG_LINK,
   8392 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8393 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8394 	}
   8395 
   8396 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8397 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8398 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8399 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8400 	if (sc->sc_type == WM_T_80003) {
   8401 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8402 		case IFM_1000_T:
   8403 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8404 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8405 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8406 			break;
   8407 		default:
   8408 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8409 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8410 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8411 			break;
   8412 		}
   8413 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8414 	}
   8415 }
   8416 
   8417 /*
   8418  * wm_kmrn_readreg:
   8419  *
   8420  *	Read a kumeran register
   8421  */
   8422 static int
   8423 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8424 {
   8425 	int rv;
   8426 
   8427 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8428 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8429 			aprint_error_dev(sc->sc_dev,
   8430 			    "%s: failed to get semaphore\n", __func__);
   8431 			return 0;
   8432 		}
   8433 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8434 		if (wm_get_swfwhw_semaphore(sc)) {
   8435 			aprint_error_dev(sc->sc_dev,
   8436 			    "%s: failed to get semaphore\n", __func__);
   8437 			return 0;
   8438 		}
   8439 	}
   8440 
   8441 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8442 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8443 	    KUMCTRLSTA_REN);
   8444 	CSR_WRITE_FLUSH(sc);
   8445 	delay(2);
   8446 
   8447 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8448 
   8449 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8450 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8451 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8452 		wm_put_swfwhw_semaphore(sc);
   8453 
   8454 	return rv;
   8455 }
   8456 
   8457 /*
   8458  * wm_kmrn_writereg:
   8459  *
   8460  *	Write a kumeran register
   8461  */
   8462 static void
   8463 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8464 {
   8465 
   8466 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8467 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8468 			aprint_error_dev(sc->sc_dev,
   8469 			    "%s: failed to get semaphore\n", __func__);
   8470 			return;
   8471 		}
   8472 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8473 		if (wm_get_swfwhw_semaphore(sc)) {
   8474 			aprint_error_dev(sc->sc_dev,
   8475 			    "%s: failed to get semaphore\n", __func__);
   8476 			return;
   8477 		}
   8478 	}
   8479 
   8480 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8481 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8482 	    (val & KUMCTRLSTA_MASK));
   8483 
   8484 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8485 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8486 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8487 		wm_put_swfwhw_semaphore(sc);
   8488 }
   8489 
   8490 /* SGMII related */
   8491 
   8492 /*
   8493  * wm_sgmii_uses_mdio
   8494  *
   8495  * Check whether the transaction is to the internal PHY or the external
   8496  * MDIO interface. Return true if it's MDIO.
   8497  */
   8498 static bool
   8499 wm_sgmii_uses_mdio(struct wm_softc *sc)
   8500 {
   8501 	uint32_t reg;
   8502 	bool ismdio = false;
   8503 
   8504 	switch (sc->sc_type) {
   8505 	case WM_T_82575:
   8506 	case WM_T_82576:
   8507 		reg = CSR_READ(sc, WMREG_MDIC);
   8508 		ismdio = ((reg & MDIC_DEST) != 0);
   8509 		break;
   8510 	case WM_T_82580:
   8511 	case WM_T_I350:
   8512 	case WM_T_I354:
   8513 	case WM_T_I210:
   8514 	case WM_T_I211:
   8515 		reg = CSR_READ(sc, WMREG_MDICNFG);
   8516 		ismdio = ((reg & MDICNFG_DEST) != 0);
   8517 		break;
   8518 	default:
   8519 		break;
   8520 	}
   8521 
   8522 	return ismdio;
   8523 }
   8524 
   8525 /*
   8526  * wm_sgmii_readreg:	[mii interface function]
   8527  *
   8528  *	Read a PHY register on the SGMII
   8529  * This could be handled by the PHY layer if we didn't have to lock the
   8530  * ressource ...
   8531  */
   8532 static int
   8533 wm_sgmii_readreg(device_t self, int phy, int reg)
   8534 {
   8535 	struct wm_softc *sc = device_private(self);
   8536 	uint32_t i2ccmd;
   8537 	int i, rv;
   8538 
   8539 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8540 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8541 		    __func__);
   8542 		return 0;
   8543 	}
   8544 
   8545 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8546 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8547 	    | I2CCMD_OPCODE_READ;
   8548 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8549 
   8550 	/* Poll the ready bit */
   8551 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8552 		delay(50);
   8553 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8554 		if (i2ccmd & I2CCMD_READY)
   8555 			break;
   8556 	}
   8557 	if ((i2ccmd & I2CCMD_READY) == 0)
   8558 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   8559 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8560 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8561 
   8562 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   8563 
   8564 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   8565 	return rv;
   8566 }
   8567 
   8568 /*
   8569  * wm_sgmii_writereg:	[mii interface function]
   8570  *
   8571  *	Write a PHY register on the SGMII.
   8572  * This could be handled by the PHY layer if we didn't have to lock the
   8573  * ressource ...
   8574  */
   8575 static void
   8576 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   8577 {
   8578 	struct wm_softc *sc = device_private(self);
   8579 	uint32_t i2ccmd;
   8580 	int i;
   8581 	int val_swapped;
   8582 
   8583 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8584 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8585 		    __func__);
   8586 		return;
   8587 	}
   8588 	/* Swap the data bytes for the I2C interface */
   8589 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   8590 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8591 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8592 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   8593 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8594 
   8595 	/* Poll the ready bit */
   8596 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8597 		delay(50);
   8598 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8599 		if (i2ccmd & I2CCMD_READY)
   8600 			break;
   8601 	}
   8602 	if ((i2ccmd & I2CCMD_READY) == 0)
   8603 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   8604 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8605 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8606 
   8607 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   8608 }
   8609 
   8610 /* TBI related */
   8611 
   8612 /*
   8613  * wm_tbi_mediainit:
   8614  *
   8615  *	Initialize media for use on 1000BASE-X devices.
   8616  */
   8617 static void
   8618 wm_tbi_mediainit(struct wm_softc *sc)
   8619 {
   8620 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8621 	const char *sep = "";
   8622 
   8623 	if (sc->sc_type < WM_T_82543)
   8624 		sc->sc_tipg = TIPG_WM_DFLT;
   8625 	else
   8626 		sc->sc_tipg = TIPG_LG_DFLT;
   8627 
   8628 	sc->sc_tbi_serdes_anegticks = 5;
   8629 
   8630 	/* Initialize our media structures */
   8631 	sc->sc_mii.mii_ifp = ifp;
   8632 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8633 
   8634 	if ((sc->sc_type >= WM_T_82575)
   8635 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   8636 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   8637 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   8638 	else
   8639 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   8640 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   8641 
   8642 	/*
   8643 	 * SWD Pins:
   8644 	 *
   8645 	 *	0 = Link LED (output)
   8646 	 *	1 = Loss Of Signal (input)
   8647 	 */
   8648 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   8649 
   8650 	/* XXX Perhaps this is only for TBI */
   8651 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   8652 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   8653 
   8654 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8655 		sc->sc_ctrl &= ~CTRL_LRST;
   8656 
   8657 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8658 
   8659 #define	ADD(ss, mm, dd)							\
   8660 do {									\
   8661 	aprint_normal("%s%s", sep, ss);					\
   8662 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   8663 	sep = ", ";							\
   8664 } while (/*CONSTCOND*/0)
   8665 
   8666 	aprint_normal_dev(sc->sc_dev, "");
   8667 
   8668 	/* Only 82545 is LX */
   8669 	if (sc->sc_type == WM_T_82545) {
   8670 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   8671 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   8672 	} else {
   8673 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   8674 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   8675 	}
   8676 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   8677 	aprint_normal("\n");
   8678 
   8679 #undef ADD
   8680 
   8681 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   8682 }
   8683 
   8684 /*
   8685  * wm_tbi_mediachange:	[ifmedia interface function]
   8686  *
   8687  *	Set hardware to newly-selected media on a 1000BASE-X device.
   8688  */
   8689 static int
   8690 wm_tbi_mediachange(struct ifnet *ifp)
   8691 {
   8692 	struct wm_softc *sc = ifp->if_softc;
   8693 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8694 	uint32_t status;
   8695 	int i;
   8696 
   8697 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   8698 		/* XXX need some work for >= 82571 and < 82575 */
   8699 		if (sc->sc_type < WM_T_82575)
   8700 			return 0;
   8701 	}
   8702 
   8703 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   8704 	    || (sc->sc_type >= WM_T_82575))
   8705 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   8706 
   8707 	sc->sc_ctrl &= ~CTRL_LRST;
   8708 	sc->sc_txcw = TXCW_ANE;
   8709 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8710 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   8711 	else if (ife->ifm_media & IFM_FDX)
   8712 		sc->sc_txcw |= TXCW_FD;
   8713 	else
   8714 		sc->sc_txcw |= TXCW_HD;
   8715 
   8716 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   8717 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   8718 
   8719 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   8720 		    device_xname(sc->sc_dev), sc->sc_txcw));
   8721 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8722 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8723 	CSR_WRITE_FLUSH(sc);
   8724 	delay(1000);
   8725 
   8726 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   8727 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   8728 
   8729 	/*
   8730 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   8731 	 * optics detect a signal, 0 if they don't.
   8732 	 */
   8733 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   8734 		/* Have signal; wait for the link to come up. */
   8735 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   8736 			delay(10000);
   8737 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   8738 				break;
   8739 		}
   8740 
   8741 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   8742 			    device_xname(sc->sc_dev),i));
   8743 
   8744 		status = CSR_READ(sc, WMREG_STATUS);
   8745 		DPRINTF(WM_DEBUG_LINK,
   8746 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   8747 			device_xname(sc->sc_dev),status, STATUS_LU));
   8748 		if (status & STATUS_LU) {
   8749 			/* Link is up. */
   8750 			DPRINTF(WM_DEBUG_LINK,
   8751 			    ("%s: LINK: set media -> link up %s\n",
   8752 			    device_xname(sc->sc_dev),
   8753 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8754 
   8755 			/*
   8756 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8757 			 * so we should update sc->sc_ctrl
   8758 			 */
   8759 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8760 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8761 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8762 			if (status & STATUS_FD)
   8763 				sc->sc_tctl |=
   8764 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8765 			else
   8766 				sc->sc_tctl |=
   8767 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8768 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   8769 				sc->sc_fcrtl |= FCRTL_XONE;
   8770 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8771 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8772 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8773 				      sc->sc_fcrtl);
   8774 			sc->sc_tbi_linkup = 1;
   8775 		} else {
   8776 			if (i == WM_LINKUP_TIMEOUT)
   8777 				wm_check_for_link(sc);
   8778 			/* Link is down. */
   8779 			DPRINTF(WM_DEBUG_LINK,
   8780 			    ("%s: LINK: set media -> link down\n",
   8781 			    device_xname(sc->sc_dev)));
   8782 			sc->sc_tbi_linkup = 0;
   8783 		}
   8784 	} else {
   8785 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   8786 		    device_xname(sc->sc_dev)));
   8787 		sc->sc_tbi_linkup = 0;
   8788 	}
   8789 
   8790 	wm_tbi_serdes_set_linkled(sc);
   8791 
   8792 	return 0;
   8793 }
   8794 
   8795 /*
   8796  * wm_tbi_mediastatus:	[ifmedia interface function]
   8797  *
   8798  *	Get the current interface media status on a 1000BASE-X device.
   8799  */
   8800 static void
   8801 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8802 {
   8803 	struct wm_softc *sc = ifp->if_softc;
   8804 	uint32_t ctrl, status;
   8805 
   8806 	ifmr->ifm_status = IFM_AVALID;
   8807 	ifmr->ifm_active = IFM_ETHER;
   8808 
   8809 	status = CSR_READ(sc, WMREG_STATUS);
   8810 	if ((status & STATUS_LU) == 0) {
   8811 		ifmr->ifm_active |= IFM_NONE;
   8812 		return;
   8813 	}
   8814 
   8815 	ifmr->ifm_status |= IFM_ACTIVE;
   8816 	/* Only 82545 is LX */
   8817 	if (sc->sc_type == WM_T_82545)
   8818 		ifmr->ifm_active |= IFM_1000_LX;
   8819 	else
   8820 		ifmr->ifm_active |= IFM_1000_SX;
   8821 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   8822 		ifmr->ifm_active |= IFM_FDX;
   8823 	else
   8824 		ifmr->ifm_active |= IFM_HDX;
   8825 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8826 	if (ctrl & CTRL_RFCE)
   8827 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   8828 	if (ctrl & CTRL_TFCE)
   8829 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   8830 }
   8831 
   8832 /* XXX TBI only */
   8833 static int
   8834 wm_check_for_link(struct wm_softc *sc)
   8835 {
   8836 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8837 	uint32_t rxcw;
   8838 	uint32_t ctrl;
   8839 	uint32_t status;
   8840 	uint32_t sig;
   8841 
   8842 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   8843 		/* XXX need some work for >= 82571 */
   8844 		if (sc->sc_type >= WM_T_82571) {
   8845 			sc->sc_tbi_linkup = 1;
   8846 			return 0;
   8847 		}
   8848 	}
   8849 
   8850 	rxcw = CSR_READ(sc, WMREG_RXCW);
   8851 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8852 	status = CSR_READ(sc, WMREG_STATUS);
   8853 
   8854 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   8855 
   8856 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   8857 		device_xname(sc->sc_dev), __func__,
   8858 		((ctrl & CTRL_SWDPIN(1)) == sig),
   8859 		((status & STATUS_LU) != 0),
   8860 		((rxcw & RXCW_C) != 0)
   8861 		    ));
   8862 
   8863 	/*
   8864 	 * SWDPIN   LU RXCW
   8865 	 *      0    0    0
   8866 	 *      0    0    1	(should not happen)
   8867 	 *      0    1    0	(should not happen)
   8868 	 *      0    1    1	(should not happen)
   8869 	 *      1    0    0	Disable autonego and force linkup
   8870 	 *      1    0    1	got /C/ but not linkup yet
   8871 	 *      1    1    0	(linkup)
   8872 	 *      1    1    1	If IFM_AUTO, back to autonego
   8873 	 *
   8874 	 */
   8875 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8876 	    && ((status & STATUS_LU) == 0)
   8877 	    && ((rxcw & RXCW_C) == 0)) {
   8878 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   8879 			__func__));
   8880 		sc->sc_tbi_linkup = 0;
   8881 		/* Disable auto-negotiation in the TXCW register */
   8882 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   8883 
   8884 		/*
   8885 		 * Force link-up and also force full-duplex.
   8886 		 *
   8887 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   8888 		 * so we should update sc->sc_ctrl
   8889 		 */
   8890 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   8891 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8892 	} else if (((status & STATUS_LU) != 0)
   8893 	    && ((rxcw & RXCW_C) != 0)
   8894 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   8895 		sc->sc_tbi_linkup = 1;
   8896 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   8897 			__func__));
   8898 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8899 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   8900 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8901 	    && ((rxcw & RXCW_C) != 0)) {
   8902 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   8903 	} else {
   8904 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   8905 			status));
   8906 	}
   8907 
   8908 	return 0;
   8909 }
   8910 
   8911 /*
   8912  * wm_tbi_tick:
   8913  *
   8914  *	Check the link on TBI devices.
   8915  *	This function acts as mii_tick().
   8916  */
   8917 static void
   8918 wm_tbi_tick(struct wm_softc *sc)
   8919 {
   8920 	struct wm_txqueue *txq = sc->sc_txq;
   8921 	struct mii_data *mii = &sc->sc_mii;
   8922 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   8923 	uint32_t status;
   8924 
   8925 	KASSERT(WM_TX_LOCKED(txq));
   8926 
   8927 	status = CSR_READ(sc, WMREG_STATUS);
   8928 
   8929 	/* XXX is this needed? */
   8930 	(void)CSR_READ(sc, WMREG_RXCW);
   8931 	(void)CSR_READ(sc, WMREG_CTRL);
   8932 
   8933 	/* set link status */
   8934 	if ((status & STATUS_LU) == 0) {
   8935 		DPRINTF(WM_DEBUG_LINK,
   8936 		    ("%s: LINK: checklink -> down\n",
   8937 			device_xname(sc->sc_dev)));
   8938 		sc->sc_tbi_linkup = 0;
   8939 	} else if (sc->sc_tbi_linkup == 0) {
   8940 		DPRINTF(WM_DEBUG_LINK,
   8941 		    ("%s: LINK: checklink -> up %s\n",
   8942 			device_xname(sc->sc_dev),
   8943 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8944 		sc->sc_tbi_linkup = 1;
   8945 		sc->sc_tbi_serdes_ticks = 0;
   8946 	}
   8947 
   8948 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   8949 		goto setled;
   8950 
   8951 	if ((status & STATUS_LU) == 0) {
   8952 		sc->sc_tbi_linkup = 0;
   8953 		/* If the timer expired, retry autonegotiation */
   8954 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8955 		    && (++sc->sc_tbi_serdes_ticks
   8956 			>= sc->sc_tbi_serdes_anegticks)) {
   8957 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   8958 			sc->sc_tbi_serdes_ticks = 0;
   8959 			/*
   8960 			 * Reset the link, and let autonegotiation do
   8961 			 * its thing
   8962 			 */
   8963 			sc->sc_ctrl |= CTRL_LRST;
   8964 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8965 			CSR_WRITE_FLUSH(sc);
   8966 			delay(1000);
   8967 			sc->sc_ctrl &= ~CTRL_LRST;
   8968 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8969 			CSR_WRITE_FLUSH(sc);
   8970 			delay(1000);
   8971 			CSR_WRITE(sc, WMREG_TXCW,
   8972 			    sc->sc_txcw & ~TXCW_ANE);
   8973 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8974 		}
   8975 	}
   8976 
   8977 setled:
   8978 	wm_tbi_serdes_set_linkled(sc);
   8979 }
   8980 
   8981 /* SERDES related */
   8982 static void
   8983 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   8984 {
   8985 	uint32_t reg;
   8986 
   8987 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   8988 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   8989 		return;
   8990 
   8991 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   8992 	reg |= PCS_CFG_PCS_EN;
   8993 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   8994 
   8995 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8996 	reg &= ~CTRL_EXT_SWDPIN(3);
   8997 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8998 	CSR_WRITE_FLUSH(sc);
   8999 }
   9000 
   9001 static int
   9002 wm_serdes_mediachange(struct ifnet *ifp)
   9003 {
   9004 	struct wm_softc *sc = ifp->if_softc;
   9005 	bool pcs_autoneg = true; /* XXX */
   9006 	uint32_t ctrl_ext, pcs_lctl, reg;
   9007 
   9008 	/* XXX Currently, this function is not called on 8257[12] */
   9009 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9010 	    || (sc->sc_type >= WM_T_82575))
   9011 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9012 
   9013 	wm_serdes_power_up_link_82575(sc);
   9014 
   9015 	sc->sc_ctrl |= CTRL_SLU;
   9016 
   9017 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9018 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9019 
   9020 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9021 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9022 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9023 	case CTRL_EXT_LINK_MODE_SGMII:
   9024 		pcs_autoneg = true;
   9025 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9026 		break;
   9027 	case CTRL_EXT_LINK_MODE_1000KX:
   9028 		pcs_autoneg = false;
   9029 		/* FALLTHROUGH */
   9030 	default:
   9031 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   9032 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9033 				pcs_autoneg = false;
   9034 		}
   9035 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9036 		    | CTRL_FRCFDX;
   9037 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9038 	}
   9039 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9040 
   9041 	if (pcs_autoneg) {
   9042 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9043 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9044 
   9045 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9046 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9047 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9048 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9049 	} else
   9050 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9051 
   9052 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9053 
   9054 
   9055 	return 0;
   9056 }
   9057 
   9058 static void
   9059 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9060 {
   9061 	struct wm_softc *sc = ifp->if_softc;
   9062 	struct mii_data *mii = &sc->sc_mii;
   9063 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9064 	uint32_t pcs_adv, pcs_lpab, reg;
   9065 
   9066 	ifmr->ifm_status = IFM_AVALID;
   9067 	ifmr->ifm_active = IFM_ETHER;
   9068 
   9069 	/* Check PCS */
   9070 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9071 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9072 		ifmr->ifm_active |= IFM_NONE;
   9073 		sc->sc_tbi_linkup = 0;
   9074 		goto setled;
   9075 	}
   9076 
   9077 	sc->sc_tbi_linkup = 1;
   9078 	ifmr->ifm_status |= IFM_ACTIVE;
   9079 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9080 	if ((reg & PCS_LSTS_FDX) != 0)
   9081 		ifmr->ifm_active |= IFM_FDX;
   9082 	else
   9083 		ifmr->ifm_active |= IFM_HDX;
   9084 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9085 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9086 		/* Check flow */
   9087 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9088 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9089 			printf("XXX LINKOK but not ACOMP\n");
   9090 			goto setled;
   9091 		}
   9092 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9093 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9094 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   9095 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9096 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9097 			mii->mii_media_active |= IFM_FLOW
   9098 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9099 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9100 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9101 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9102 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9103 			mii->mii_media_active |= IFM_FLOW
   9104 			    | IFM_ETH_TXPAUSE;
   9105 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9106 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9107 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9108 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9109 			mii->mii_media_active |= IFM_FLOW
   9110 			    | IFM_ETH_RXPAUSE;
   9111 		} else {
   9112 		}
   9113 	}
   9114 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9115 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9116 setled:
   9117 	wm_tbi_serdes_set_linkled(sc);
   9118 }
   9119 
   9120 /*
   9121  * wm_serdes_tick:
   9122  *
   9123  *	Check the link on serdes devices.
   9124  */
   9125 static void
   9126 wm_serdes_tick(struct wm_softc *sc)
   9127 {
   9128 	struct wm_txqueue *txq = sc->sc_txq;
   9129 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9130 	struct mii_data *mii = &sc->sc_mii;
   9131 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9132 	uint32_t reg;
   9133 
   9134 	KASSERT(WM_TX_LOCKED(txq));
   9135 
   9136 	mii->mii_media_status = IFM_AVALID;
   9137 	mii->mii_media_active = IFM_ETHER;
   9138 
   9139 	/* Check PCS */
   9140 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9141 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9142 		mii->mii_media_status |= IFM_ACTIVE;
   9143 		sc->sc_tbi_linkup = 1;
   9144 		sc->sc_tbi_serdes_ticks = 0;
   9145 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9146 		if ((reg & PCS_LSTS_FDX) != 0)
   9147 			mii->mii_media_active |= IFM_FDX;
   9148 		else
   9149 			mii->mii_media_active |= IFM_HDX;
   9150 	} else {
   9151 		mii->mii_media_status |= IFM_NONE;
   9152 		sc->sc_tbi_linkup = 0;
   9153 		    /* If the timer expired, retry autonegotiation */
   9154 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9155 		    && (++sc->sc_tbi_serdes_ticks
   9156 			>= sc->sc_tbi_serdes_anegticks)) {
   9157 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9158 			sc->sc_tbi_serdes_ticks = 0;
   9159 			/* XXX */
   9160 			wm_serdes_mediachange(ifp);
   9161 		}
   9162 	}
   9163 
   9164 	wm_tbi_serdes_set_linkled(sc);
   9165 }
   9166 
   9167 /* SFP related */
   9168 
   9169 static int
   9170 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9171 {
   9172 	uint32_t i2ccmd;
   9173 	int i;
   9174 
   9175 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9176 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9177 
   9178 	/* Poll the ready bit */
   9179 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9180 		delay(50);
   9181 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9182 		if (i2ccmd & I2CCMD_READY)
   9183 			break;
   9184 	}
   9185 	if ((i2ccmd & I2CCMD_READY) == 0)
   9186 		return -1;
   9187 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9188 		return -1;
   9189 
   9190 	*data = i2ccmd & 0x00ff;
   9191 
   9192 	return 0;
   9193 }
   9194 
   9195 static uint32_t
   9196 wm_sfp_get_media_type(struct wm_softc *sc)
   9197 {
   9198 	uint32_t ctrl_ext;
   9199 	uint8_t val = 0;
   9200 	int timeout = 3;
   9201 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9202 	int rv = -1;
   9203 
   9204 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9205 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9206 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9207 	CSR_WRITE_FLUSH(sc);
   9208 
   9209 	/* Read SFP module data */
   9210 	while (timeout) {
   9211 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9212 		if (rv == 0)
   9213 			break;
   9214 		delay(100*1000); /* XXX too big */
   9215 		timeout--;
   9216 	}
   9217 	if (rv != 0)
   9218 		goto out;
   9219 	switch (val) {
   9220 	case SFF_SFP_ID_SFF:
   9221 		aprint_normal_dev(sc->sc_dev,
   9222 		    "Module/Connector soldered to board\n");
   9223 		break;
   9224 	case SFF_SFP_ID_SFP:
   9225 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9226 		break;
   9227 	case SFF_SFP_ID_UNKNOWN:
   9228 		goto out;
   9229 	default:
   9230 		break;
   9231 	}
   9232 
   9233 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9234 	if (rv != 0) {
   9235 		goto out;
   9236 	}
   9237 
   9238 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9239 		mediatype = WM_MEDIATYPE_SERDES;
   9240 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9241 		sc->sc_flags |= WM_F_SGMII;
   9242 		mediatype = WM_MEDIATYPE_COPPER;
   9243 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9244 		sc->sc_flags |= WM_F_SGMII;
   9245 		mediatype = WM_MEDIATYPE_SERDES;
   9246 	}
   9247 
   9248 out:
   9249 	/* Restore I2C interface setting */
   9250 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9251 
   9252 	return mediatype;
   9253 }
   9254 /*
   9255  * NVM related.
   9256  * Microwire, SPI (w/wo EERD) and Flash.
   9257  */
   9258 
   9259 /* Both spi and uwire */
   9260 
   9261 /*
   9262  * wm_eeprom_sendbits:
   9263  *
   9264  *	Send a series of bits to the EEPROM.
   9265  */
   9266 static void
   9267 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9268 {
   9269 	uint32_t reg;
   9270 	int x;
   9271 
   9272 	reg = CSR_READ(sc, WMREG_EECD);
   9273 
   9274 	for (x = nbits; x > 0; x--) {
   9275 		if (bits & (1U << (x - 1)))
   9276 			reg |= EECD_DI;
   9277 		else
   9278 			reg &= ~EECD_DI;
   9279 		CSR_WRITE(sc, WMREG_EECD, reg);
   9280 		CSR_WRITE_FLUSH(sc);
   9281 		delay(2);
   9282 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9283 		CSR_WRITE_FLUSH(sc);
   9284 		delay(2);
   9285 		CSR_WRITE(sc, WMREG_EECD, reg);
   9286 		CSR_WRITE_FLUSH(sc);
   9287 		delay(2);
   9288 	}
   9289 }
   9290 
   9291 /*
   9292  * wm_eeprom_recvbits:
   9293  *
   9294  *	Receive a series of bits from the EEPROM.
   9295  */
   9296 static void
   9297 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9298 {
   9299 	uint32_t reg, val;
   9300 	int x;
   9301 
   9302 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9303 
   9304 	val = 0;
   9305 	for (x = nbits; x > 0; x--) {
   9306 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9307 		CSR_WRITE_FLUSH(sc);
   9308 		delay(2);
   9309 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9310 			val |= (1U << (x - 1));
   9311 		CSR_WRITE(sc, WMREG_EECD, reg);
   9312 		CSR_WRITE_FLUSH(sc);
   9313 		delay(2);
   9314 	}
   9315 	*valp = val;
   9316 }
   9317 
   9318 /* Microwire */
   9319 
   9320 /*
   9321  * wm_nvm_read_uwire:
   9322  *
   9323  *	Read a word from the EEPROM using the MicroWire protocol.
   9324  */
   9325 static int
   9326 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9327 {
   9328 	uint32_t reg, val;
   9329 	int i;
   9330 
   9331 	for (i = 0; i < wordcnt; i++) {
   9332 		/* Clear SK and DI. */
   9333 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9334 		CSR_WRITE(sc, WMREG_EECD, reg);
   9335 
   9336 		/*
   9337 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9338 		 * and Xen.
   9339 		 *
   9340 		 * We use this workaround only for 82540 because qemu's
   9341 		 * e1000 act as 82540.
   9342 		 */
   9343 		if (sc->sc_type == WM_T_82540) {
   9344 			reg |= EECD_SK;
   9345 			CSR_WRITE(sc, WMREG_EECD, reg);
   9346 			reg &= ~EECD_SK;
   9347 			CSR_WRITE(sc, WMREG_EECD, reg);
   9348 			CSR_WRITE_FLUSH(sc);
   9349 			delay(2);
   9350 		}
   9351 		/* XXX: end of workaround */
   9352 
   9353 		/* Set CHIP SELECT. */
   9354 		reg |= EECD_CS;
   9355 		CSR_WRITE(sc, WMREG_EECD, reg);
   9356 		CSR_WRITE_FLUSH(sc);
   9357 		delay(2);
   9358 
   9359 		/* Shift in the READ command. */
   9360 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9361 
   9362 		/* Shift in address. */
   9363 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9364 
   9365 		/* Shift out the data. */
   9366 		wm_eeprom_recvbits(sc, &val, 16);
   9367 		data[i] = val & 0xffff;
   9368 
   9369 		/* Clear CHIP SELECT. */
   9370 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9371 		CSR_WRITE(sc, WMREG_EECD, reg);
   9372 		CSR_WRITE_FLUSH(sc);
   9373 		delay(2);
   9374 	}
   9375 
   9376 	return 0;
   9377 }
   9378 
   9379 /* SPI */
   9380 
   9381 /*
   9382  * Set SPI and FLASH related information from the EECD register.
   9383  * For 82541 and 82547, the word size is taken from EEPROM.
   9384  */
   9385 static int
   9386 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9387 {
   9388 	int size;
   9389 	uint32_t reg;
   9390 	uint16_t data;
   9391 
   9392 	reg = CSR_READ(sc, WMREG_EECD);
   9393 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9394 
   9395 	/* Read the size of NVM from EECD by default */
   9396 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9397 	switch (sc->sc_type) {
   9398 	case WM_T_82541:
   9399 	case WM_T_82541_2:
   9400 	case WM_T_82547:
   9401 	case WM_T_82547_2:
   9402 		/* Set dummy value to access EEPROM */
   9403 		sc->sc_nvm_wordsize = 64;
   9404 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9405 		reg = data;
   9406 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9407 		if (size == 0)
   9408 			size = 6; /* 64 word size */
   9409 		else
   9410 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9411 		break;
   9412 	case WM_T_80003:
   9413 	case WM_T_82571:
   9414 	case WM_T_82572:
   9415 	case WM_T_82573: /* SPI case */
   9416 	case WM_T_82574: /* SPI case */
   9417 	case WM_T_82583: /* SPI case */
   9418 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9419 		if (size > 14)
   9420 			size = 14;
   9421 		break;
   9422 	case WM_T_82575:
   9423 	case WM_T_82576:
   9424 	case WM_T_82580:
   9425 	case WM_T_I350:
   9426 	case WM_T_I354:
   9427 	case WM_T_I210:
   9428 	case WM_T_I211:
   9429 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9430 		if (size > 15)
   9431 			size = 15;
   9432 		break;
   9433 	default:
   9434 		aprint_error_dev(sc->sc_dev,
   9435 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9436 		return -1;
   9437 		break;
   9438 	}
   9439 
   9440 	sc->sc_nvm_wordsize = 1 << size;
   9441 
   9442 	return 0;
   9443 }
   9444 
   9445 /*
   9446  * wm_nvm_ready_spi:
   9447  *
   9448  *	Wait for a SPI EEPROM to be ready for commands.
   9449  */
   9450 static int
   9451 wm_nvm_ready_spi(struct wm_softc *sc)
   9452 {
   9453 	uint32_t val;
   9454 	int usec;
   9455 
   9456 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9457 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9458 		wm_eeprom_recvbits(sc, &val, 8);
   9459 		if ((val & SPI_SR_RDY) == 0)
   9460 			break;
   9461 	}
   9462 	if (usec >= SPI_MAX_RETRIES) {
   9463 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   9464 		return 1;
   9465 	}
   9466 	return 0;
   9467 }
   9468 
   9469 /*
   9470  * wm_nvm_read_spi:
   9471  *
   9472  *	Read a work from the EEPROM using the SPI protocol.
   9473  */
   9474 static int
   9475 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9476 {
   9477 	uint32_t reg, val;
   9478 	int i;
   9479 	uint8_t opc;
   9480 
   9481 	/* Clear SK and CS. */
   9482 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   9483 	CSR_WRITE(sc, WMREG_EECD, reg);
   9484 	CSR_WRITE_FLUSH(sc);
   9485 	delay(2);
   9486 
   9487 	if (wm_nvm_ready_spi(sc))
   9488 		return 1;
   9489 
   9490 	/* Toggle CS to flush commands. */
   9491 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   9492 	CSR_WRITE_FLUSH(sc);
   9493 	delay(2);
   9494 	CSR_WRITE(sc, WMREG_EECD, reg);
   9495 	CSR_WRITE_FLUSH(sc);
   9496 	delay(2);
   9497 
   9498 	opc = SPI_OPC_READ;
   9499 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   9500 		opc |= SPI_OPC_A8;
   9501 
   9502 	wm_eeprom_sendbits(sc, opc, 8);
   9503 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   9504 
   9505 	for (i = 0; i < wordcnt; i++) {
   9506 		wm_eeprom_recvbits(sc, &val, 16);
   9507 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   9508 	}
   9509 
   9510 	/* Raise CS and clear SK. */
   9511 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   9512 	CSR_WRITE(sc, WMREG_EECD, reg);
   9513 	CSR_WRITE_FLUSH(sc);
   9514 	delay(2);
   9515 
   9516 	return 0;
   9517 }
   9518 
   9519 /* Using with EERD */
   9520 
   9521 static int
   9522 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   9523 {
   9524 	uint32_t attempts = 100000;
   9525 	uint32_t i, reg = 0;
   9526 	int32_t done = -1;
   9527 
   9528 	for (i = 0; i < attempts; i++) {
   9529 		reg = CSR_READ(sc, rw);
   9530 
   9531 		if (reg & EERD_DONE) {
   9532 			done = 0;
   9533 			break;
   9534 		}
   9535 		delay(5);
   9536 	}
   9537 
   9538 	return done;
   9539 }
   9540 
   9541 static int
   9542 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   9543     uint16_t *data)
   9544 {
   9545 	int i, eerd = 0;
   9546 	int error = 0;
   9547 
   9548 	for (i = 0; i < wordcnt; i++) {
   9549 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   9550 
   9551 		CSR_WRITE(sc, WMREG_EERD, eerd);
   9552 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   9553 		if (error != 0)
   9554 			break;
   9555 
   9556 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   9557 	}
   9558 
   9559 	return error;
   9560 }
   9561 
   9562 /* Flash */
   9563 
   9564 static int
   9565 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   9566 {
   9567 	uint32_t eecd;
   9568 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   9569 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   9570 	uint8_t sig_byte = 0;
   9571 
   9572 	switch (sc->sc_type) {
   9573 	case WM_T_ICH8:
   9574 	case WM_T_ICH9:
   9575 		eecd = CSR_READ(sc, WMREG_EECD);
   9576 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   9577 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   9578 			return 0;
   9579 		}
   9580 		/* FALLTHROUGH */
   9581 	default:
   9582 		/* Default to 0 */
   9583 		*bank = 0;
   9584 
   9585 		/* Check bank 0 */
   9586 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   9587 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9588 			*bank = 0;
   9589 			return 0;
   9590 		}
   9591 
   9592 		/* Check bank 1 */
   9593 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   9594 		    &sig_byte);
   9595 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9596 			*bank = 1;
   9597 			return 0;
   9598 		}
   9599 	}
   9600 
   9601 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   9602 		device_xname(sc->sc_dev)));
   9603 	return -1;
   9604 }
   9605 
   9606 /******************************************************************************
   9607  * This function does initial flash setup so that a new read/write/erase cycle
   9608  * can be started.
   9609  *
   9610  * sc - The pointer to the hw structure
   9611  ****************************************************************************/
   9612 static int32_t
   9613 wm_ich8_cycle_init(struct wm_softc *sc)
   9614 {
   9615 	uint16_t hsfsts;
   9616 	int32_t error = 1;
   9617 	int32_t i     = 0;
   9618 
   9619 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9620 
   9621 	/* May be check the Flash Des Valid bit in Hw status */
   9622 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   9623 		return error;
   9624 	}
   9625 
   9626 	/* Clear FCERR in Hw status by writing 1 */
   9627 	/* Clear DAEL in Hw status by writing a 1 */
   9628 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   9629 
   9630 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   9631 
   9632 	/*
   9633 	 * Either we should have a hardware SPI cycle in progress bit to check
   9634 	 * against, in order to start a new cycle or FDONE bit should be
   9635 	 * changed in the hardware so that it is 1 after harware reset, which
   9636 	 * can then be used as an indication whether a cycle is in progress or
   9637 	 * has been completed .. we should also have some software semaphore
   9638 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   9639 	 * threads access to those bits can be sequentiallized or a way so that
   9640 	 * 2 threads dont start the cycle at the same time
   9641 	 */
   9642 
   9643 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   9644 		/*
   9645 		 * There is no cycle running at present, so we can start a
   9646 		 * cycle
   9647 		 */
   9648 
   9649 		/* Begin by setting Flash Cycle Done. */
   9650 		hsfsts |= HSFSTS_DONE;
   9651 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   9652 		error = 0;
   9653 	} else {
   9654 		/*
   9655 		 * otherwise poll for sometime so the current cycle has a
   9656 		 * chance to end before giving up.
   9657 		 */
   9658 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   9659 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9660 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   9661 				error = 0;
   9662 				break;
   9663 			}
   9664 			delay(1);
   9665 		}
   9666 		if (error == 0) {
   9667 			/*
   9668 			 * Successful in waiting for previous cycle to timeout,
   9669 			 * now set the Flash Cycle Done.
   9670 			 */
   9671 			hsfsts |= HSFSTS_DONE;
   9672 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   9673 		}
   9674 	}
   9675 	return error;
   9676 }
   9677 
   9678 /******************************************************************************
   9679  * This function starts a flash cycle and waits for its completion
   9680  *
   9681  * sc - The pointer to the hw structure
   9682  ****************************************************************************/
   9683 static int32_t
   9684 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   9685 {
   9686 	uint16_t hsflctl;
   9687 	uint16_t hsfsts;
   9688 	int32_t error = 1;
   9689 	uint32_t i = 0;
   9690 
   9691 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   9692 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   9693 	hsflctl |= HSFCTL_GO;
   9694 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   9695 
   9696 	/* Wait till FDONE bit is set to 1 */
   9697 	do {
   9698 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9699 		if (hsfsts & HSFSTS_DONE)
   9700 			break;
   9701 		delay(1);
   9702 		i++;
   9703 	} while (i < timeout);
   9704 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   9705 		error = 0;
   9706 
   9707 	return error;
   9708 }
   9709 
   9710 /******************************************************************************
   9711  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   9712  *
   9713  * sc - The pointer to the hw structure
   9714  * index - The index of the byte or word to read.
   9715  * size - Size of data to read, 1=byte 2=word
   9716  * data - Pointer to the word to store the value read.
   9717  *****************************************************************************/
   9718 static int32_t
   9719 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   9720     uint32_t size, uint16_t *data)
   9721 {
   9722 	uint16_t hsfsts;
   9723 	uint16_t hsflctl;
   9724 	uint32_t flash_linear_address;
   9725 	uint32_t flash_data = 0;
   9726 	int32_t error = 1;
   9727 	int32_t count = 0;
   9728 
   9729 	if (size < 1  || size > 2 || data == 0x0 ||
   9730 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   9731 		return error;
   9732 
   9733 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   9734 	    sc->sc_ich8_flash_base;
   9735 
   9736 	do {
   9737 		delay(1);
   9738 		/* Steps */
   9739 		error = wm_ich8_cycle_init(sc);
   9740 		if (error)
   9741 			break;
   9742 
   9743 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   9744 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   9745 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   9746 		    & HSFCTL_BCOUNT_MASK;
   9747 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   9748 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   9749 
   9750 		/*
   9751 		 * Write the last 24 bits of index into Flash Linear address
   9752 		 * field in Flash Address
   9753 		 */
   9754 		/* TODO: TBD maybe check the index against the size of flash */
   9755 
   9756 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   9757 
   9758 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   9759 
   9760 		/*
   9761 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   9762 		 * the whole sequence a few more times, else read in (shift in)
   9763 		 * the Flash Data0, the order is least significant byte first
   9764 		 * msb to lsb
   9765 		 */
   9766 		if (error == 0) {
   9767 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   9768 			if (size == 1)
   9769 				*data = (uint8_t)(flash_data & 0x000000FF);
   9770 			else if (size == 2)
   9771 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   9772 			break;
   9773 		} else {
   9774 			/*
   9775 			 * If we've gotten here, then things are probably
   9776 			 * completely hosed, but if the error condition is
   9777 			 * detected, it won't hurt to give it another try...
   9778 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   9779 			 */
   9780 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9781 			if (hsfsts & HSFSTS_ERR) {
   9782 				/* Repeat for some time before giving up. */
   9783 				continue;
   9784 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   9785 				break;
   9786 		}
   9787 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   9788 
   9789 	return error;
   9790 }
   9791 
   9792 /******************************************************************************
   9793  * Reads a single byte from the NVM using the ICH8 flash access registers.
   9794  *
   9795  * sc - pointer to wm_hw structure
   9796  * index - The index of the byte to read.
   9797  * data - Pointer to a byte to store the value read.
   9798  *****************************************************************************/
   9799 static int32_t
   9800 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   9801 {
   9802 	int32_t status;
   9803 	uint16_t word = 0;
   9804 
   9805 	status = wm_read_ich8_data(sc, index, 1, &word);
   9806 	if (status == 0)
   9807 		*data = (uint8_t)word;
   9808 	else
   9809 		*data = 0;
   9810 
   9811 	return status;
   9812 }
   9813 
   9814 /******************************************************************************
   9815  * Reads a word from the NVM using the ICH8 flash access registers.
   9816  *
   9817  * sc - pointer to wm_hw structure
   9818  * index - The starting byte index of the word to read.
   9819  * data - Pointer to a word to store the value read.
   9820  *****************************************************************************/
   9821 static int32_t
   9822 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   9823 {
   9824 	int32_t status;
   9825 
   9826 	status = wm_read_ich8_data(sc, index, 2, data);
   9827 	return status;
   9828 }
   9829 
   9830 /******************************************************************************
   9831  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   9832  * register.
   9833  *
   9834  * sc - Struct containing variables accessed by shared code
   9835  * offset - offset of word in the EEPROM to read
   9836  * data - word read from the EEPROM
   9837  * words - number of words to read
   9838  *****************************************************************************/
   9839 static int
   9840 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   9841 {
   9842 	int32_t  error = 0;
   9843 	uint32_t flash_bank = 0;
   9844 	uint32_t act_offset = 0;
   9845 	uint32_t bank_offset = 0;
   9846 	uint16_t word = 0;
   9847 	uint16_t i = 0;
   9848 
   9849 	/*
   9850 	 * We need to know which is the valid flash bank.  In the event
   9851 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   9852 	 * managing flash_bank.  So it cannot be trusted and needs
   9853 	 * to be updated with each read.
   9854 	 */
   9855 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   9856 	if (error) {
   9857 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   9858 			device_xname(sc->sc_dev)));
   9859 		flash_bank = 0;
   9860 	}
   9861 
   9862 	/*
   9863 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   9864 	 * size
   9865 	 */
   9866 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   9867 
   9868 	error = wm_get_swfwhw_semaphore(sc);
   9869 	if (error) {
   9870 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9871 		    __func__);
   9872 		return error;
   9873 	}
   9874 
   9875 	for (i = 0; i < words; i++) {
   9876 		/* The NVM part needs a byte offset, hence * 2 */
   9877 		act_offset = bank_offset + ((offset + i) * 2);
   9878 		error = wm_read_ich8_word(sc, act_offset, &word);
   9879 		if (error) {
   9880 			aprint_error_dev(sc->sc_dev,
   9881 			    "%s: failed to read NVM\n", __func__);
   9882 			break;
   9883 		}
   9884 		data[i] = word;
   9885 	}
   9886 
   9887 	wm_put_swfwhw_semaphore(sc);
   9888 	return error;
   9889 }
   9890 
   9891 /* iNVM */
   9892 
   9893 static int
   9894 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   9895 {
   9896 	int32_t  rv = 0;
   9897 	uint32_t invm_dword;
   9898 	uint16_t i;
   9899 	uint8_t record_type, word_address;
   9900 
   9901 	for (i = 0; i < INVM_SIZE; i++) {
   9902 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   9903 		/* Get record type */
   9904 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   9905 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   9906 			break;
   9907 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   9908 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   9909 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   9910 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   9911 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   9912 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   9913 			if (word_address == address) {
   9914 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   9915 				rv = 0;
   9916 				break;
   9917 			}
   9918 		}
   9919 	}
   9920 
   9921 	return rv;
   9922 }
   9923 
   9924 static int
   9925 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   9926 {
   9927 	int rv = 0;
   9928 	int i;
   9929 
   9930 	for (i = 0; i < words; i++) {
   9931 		switch (offset + i) {
   9932 		case NVM_OFF_MACADDR:
   9933 		case NVM_OFF_MACADDR1:
   9934 		case NVM_OFF_MACADDR2:
   9935 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   9936 			if (rv != 0) {
   9937 				data[i] = 0xffff;
   9938 				rv = -1;
   9939 			}
   9940 			break;
   9941 		case NVM_OFF_CFG2:
   9942 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9943 			if (rv != 0) {
   9944 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   9945 				rv = 0;
   9946 			}
   9947 			break;
   9948 		case NVM_OFF_CFG4:
   9949 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9950 			if (rv != 0) {
   9951 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   9952 				rv = 0;
   9953 			}
   9954 			break;
   9955 		case NVM_OFF_LED_1_CFG:
   9956 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9957 			if (rv != 0) {
   9958 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   9959 				rv = 0;
   9960 			}
   9961 			break;
   9962 		case NVM_OFF_LED_0_2_CFG:
   9963 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9964 			if (rv != 0) {
   9965 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   9966 				rv = 0;
   9967 			}
   9968 			break;
   9969 		case NVM_OFF_ID_LED_SETTINGS:
   9970 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9971 			if (rv != 0) {
   9972 				*data = ID_LED_RESERVED_FFFF;
   9973 				rv = 0;
   9974 			}
   9975 			break;
   9976 		default:
   9977 			DPRINTF(WM_DEBUG_NVM,
   9978 			    ("NVM word 0x%02x is not mapped.\n", offset));
   9979 			*data = NVM_RESERVED_WORD;
   9980 			break;
   9981 		}
   9982 	}
   9983 
   9984 	return rv;
   9985 }
   9986 
   9987 /* Lock, detecting NVM type, validate checksum, version and read */
   9988 
   9989 /*
   9990  * wm_nvm_acquire:
   9991  *
   9992  *	Perform the EEPROM handshake required on some chips.
   9993  */
   9994 static int
   9995 wm_nvm_acquire(struct wm_softc *sc)
   9996 {
   9997 	uint32_t reg;
   9998 	int x;
   9999 	int ret = 0;
   10000 
   10001 	/* always success */
   10002 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10003 		return 0;
   10004 
   10005 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10006 		ret = wm_get_swfwhw_semaphore(sc);
   10007 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10008 		/* This will also do wm_get_swsm_semaphore() if needed */
   10009 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10010 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10011 		ret = wm_get_swsm_semaphore(sc);
   10012 	}
   10013 
   10014 	if (ret) {
   10015 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10016 			__func__);
   10017 		return 1;
   10018 	}
   10019 
   10020 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10021 		reg = CSR_READ(sc, WMREG_EECD);
   10022 
   10023 		/* Request EEPROM access. */
   10024 		reg |= EECD_EE_REQ;
   10025 		CSR_WRITE(sc, WMREG_EECD, reg);
   10026 
   10027 		/* ..and wait for it to be granted. */
   10028 		for (x = 0; x < 1000; x++) {
   10029 			reg = CSR_READ(sc, WMREG_EECD);
   10030 			if (reg & EECD_EE_GNT)
   10031 				break;
   10032 			delay(5);
   10033 		}
   10034 		if ((reg & EECD_EE_GNT) == 0) {
   10035 			aprint_error_dev(sc->sc_dev,
   10036 			    "could not acquire EEPROM GNT\n");
   10037 			reg &= ~EECD_EE_REQ;
   10038 			CSR_WRITE(sc, WMREG_EECD, reg);
   10039 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10040 				wm_put_swfwhw_semaphore(sc);
   10041 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10042 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10043 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10044 				wm_put_swsm_semaphore(sc);
   10045 			return 1;
   10046 		}
   10047 	}
   10048 
   10049 	return 0;
   10050 }
   10051 
   10052 /*
   10053  * wm_nvm_release:
   10054  *
   10055  *	Release the EEPROM mutex.
   10056  */
   10057 static void
   10058 wm_nvm_release(struct wm_softc *sc)
   10059 {
   10060 	uint32_t reg;
   10061 
   10062 	/* always success */
   10063 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10064 		return;
   10065 
   10066 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10067 		reg = CSR_READ(sc, WMREG_EECD);
   10068 		reg &= ~EECD_EE_REQ;
   10069 		CSR_WRITE(sc, WMREG_EECD, reg);
   10070 	}
   10071 
   10072 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10073 		wm_put_swfwhw_semaphore(sc);
   10074 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10075 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10076 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10077 		wm_put_swsm_semaphore(sc);
   10078 }
   10079 
   10080 static int
   10081 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10082 {
   10083 	uint32_t eecd = 0;
   10084 
   10085 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10086 	    || sc->sc_type == WM_T_82583) {
   10087 		eecd = CSR_READ(sc, WMREG_EECD);
   10088 
   10089 		/* Isolate bits 15 & 16 */
   10090 		eecd = ((eecd >> 15) & 0x03);
   10091 
   10092 		/* If both bits are set, device is Flash type */
   10093 		if (eecd == 0x03)
   10094 			return 0;
   10095 	}
   10096 	return 1;
   10097 }
   10098 
   10099 static int
   10100 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10101 {
   10102 	uint32_t eec;
   10103 
   10104 	eec = CSR_READ(sc, WMREG_EEC);
   10105 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10106 		return 1;
   10107 
   10108 	return 0;
   10109 }
   10110 
   10111 /*
   10112  * wm_nvm_validate_checksum
   10113  *
   10114  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10115  */
   10116 static int
   10117 wm_nvm_validate_checksum(struct wm_softc *sc)
   10118 {
   10119 	uint16_t checksum;
   10120 	uint16_t eeprom_data;
   10121 #ifdef WM_DEBUG
   10122 	uint16_t csum_wordaddr, valid_checksum;
   10123 #endif
   10124 	int i;
   10125 
   10126 	checksum = 0;
   10127 
   10128 	/* Don't check for I211 */
   10129 	if (sc->sc_type == WM_T_I211)
   10130 		return 0;
   10131 
   10132 #ifdef WM_DEBUG
   10133 	if (sc->sc_type == WM_T_PCH_LPT) {
   10134 		csum_wordaddr = NVM_OFF_COMPAT;
   10135 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10136 	} else {
   10137 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10138 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10139 	}
   10140 
   10141 	/* Dump EEPROM image for debug */
   10142 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10143 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10144 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10145 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10146 		if ((eeprom_data & valid_checksum) == 0) {
   10147 			DPRINTF(WM_DEBUG_NVM,
   10148 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10149 				device_xname(sc->sc_dev), eeprom_data,
   10150 				    valid_checksum));
   10151 		}
   10152 	}
   10153 
   10154 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10155 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10156 		for (i = 0; i < NVM_SIZE; i++) {
   10157 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10158 				printf("XXXX ");
   10159 			else
   10160 				printf("%04hx ", eeprom_data);
   10161 			if (i % 8 == 7)
   10162 				printf("\n");
   10163 		}
   10164 	}
   10165 
   10166 #endif /* WM_DEBUG */
   10167 
   10168 	for (i = 0; i < NVM_SIZE; i++) {
   10169 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10170 			return 1;
   10171 		checksum += eeprom_data;
   10172 	}
   10173 
   10174 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10175 #ifdef WM_DEBUG
   10176 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10177 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10178 #endif
   10179 	}
   10180 
   10181 	return 0;
   10182 }
   10183 
   10184 static void
   10185 wm_nvm_version_invm(struct wm_softc *sc)
   10186 {
   10187 	uint32_t dword;
   10188 
   10189 	/*
   10190 	 * Linux's code to decode version is very strange, so we don't
   10191 	 * obey that algorithm and just use word 61 as the document.
   10192 	 * Perhaps it's not perfect though...
   10193 	 *
   10194 	 * Example:
   10195 	 *
   10196 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10197 	 */
   10198 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10199 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10200 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10201 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10202 }
   10203 
   10204 static void
   10205 wm_nvm_version(struct wm_softc *sc)
   10206 {
   10207 	uint16_t major, minor, build, patch;
   10208 	uint16_t uid0, uid1;
   10209 	uint16_t nvm_data;
   10210 	uint16_t off;
   10211 	bool check_version = false;
   10212 	bool check_optionrom = false;
   10213 	bool have_build = false;
   10214 
   10215 	/*
   10216 	 * Version format:
   10217 	 *
   10218 	 * XYYZ
   10219 	 * X0YZ
   10220 	 * X0YY
   10221 	 *
   10222 	 * Example:
   10223 	 *
   10224 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10225 	 *	82571	0x50a6	5.10.6?
   10226 	 *	82572	0x506a	5.6.10?
   10227 	 *	82572EI	0x5069	5.6.9?
   10228 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10229 	 *		0x2013	2.1.3?
   10230 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10231 	 */
   10232 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10233 	switch (sc->sc_type) {
   10234 	case WM_T_82571:
   10235 	case WM_T_82572:
   10236 	case WM_T_82574:
   10237 	case WM_T_82583:
   10238 		check_version = true;
   10239 		check_optionrom = true;
   10240 		have_build = true;
   10241 		break;
   10242 	case WM_T_82575:
   10243 	case WM_T_82576:
   10244 	case WM_T_82580:
   10245 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10246 			check_version = true;
   10247 		break;
   10248 	case WM_T_I211:
   10249 		wm_nvm_version_invm(sc);
   10250 		goto printver;
   10251 	case WM_T_I210:
   10252 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10253 			wm_nvm_version_invm(sc);
   10254 			goto printver;
   10255 		}
   10256 		/* FALLTHROUGH */
   10257 	case WM_T_I350:
   10258 	case WM_T_I354:
   10259 		check_version = true;
   10260 		check_optionrom = true;
   10261 		break;
   10262 	default:
   10263 		return;
   10264 	}
   10265 	if (check_version) {
   10266 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10267 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10268 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10269 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10270 			build = nvm_data & NVM_BUILD_MASK;
   10271 			have_build = true;
   10272 		} else
   10273 			minor = nvm_data & 0x00ff;
   10274 
   10275 		/* Decimal */
   10276 		minor = (minor / 16) * 10 + (minor % 16);
   10277 		sc->sc_nvm_ver_major = major;
   10278 		sc->sc_nvm_ver_minor = minor;
   10279 
   10280 printver:
   10281 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10282 		    sc->sc_nvm_ver_minor);
   10283 		if (have_build) {
   10284 			sc->sc_nvm_ver_build = build;
   10285 			aprint_verbose(".%d", build);
   10286 		}
   10287 	}
   10288 	if (check_optionrom) {
   10289 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10290 		/* Option ROM Version */
   10291 		if ((off != 0x0000) && (off != 0xffff)) {
   10292 			off += NVM_COMBO_VER_OFF;
   10293 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10294 			wm_nvm_read(sc, off, 1, &uid0);
   10295 			if ((uid0 != 0) && (uid0 != 0xffff)
   10296 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10297 				/* 16bits */
   10298 				major = uid0 >> 8;
   10299 				build = (uid0 << 8) | (uid1 >> 8);
   10300 				patch = uid1 & 0x00ff;
   10301 				aprint_verbose(", option ROM Version %d.%d.%d",
   10302 				    major, build, patch);
   10303 			}
   10304 		}
   10305 	}
   10306 
   10307 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10308 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10309 }
   10310 
   10311 /*
   10312  * wm_nvm_read:
   10313  *
   10314  *	Read data from the serial EEPROM.
   10315  */
   10316 static int
   10317 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10318 {
   10319 	int rv;
   10320 
   10321 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10322 		return 1;
   10323 
   10324 	if (wm_nvm_acquire(sc))
   10325 		return 1;
   10326 
   10327 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10328 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10329 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10330 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10331 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10332 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10333 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10334 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10335 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10336 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10337 	else
   10338 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10339 
   10340 	wm_nvm_release(sc);
   10341 	return rv;
   10342 }
   10343 
   10344 /*
   10345  * Hardware semaphores.
   10346  * Very complexed...
   10347  */
   10348 
   10349 static int
   10350 wm_get_swsm_semaphore(struct wm_softc *sc)
   10351 {
   10352 	int32_t timeout;
   10353 	uint32_t swsm;
   10354 
   10355 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10356 		/* Get the SW semaphore. */
   10357 		timeout = sc->sc_nvm_wordsize + 1;
   10358 		while (timeout) {
   10359 			swsm = CSR_READ(sc, WMREG_SWSM);
   10360 
   10361 			if ((swsm & SWSM_SMBI) == 0)
   10362 				break;
   10363 
   10364 			delay(50);
   10365 			timeout--;
   10366 		}
   10367 
   10368 		if (timeout == 0) {
   10369 			aprint_error_dev(sc->sc_dev,
   10370 			    "could not acquire SWSM SMBI\n");
   10371 			return 1;
   10372 		}
   10373 	}
   10374 
   10375 	/* Get the FW semaphore. */
   10376 	timeout = sc->sc_nvm_wordsize + 1;
   10377 	while (timeout) {
   10378 		swsm = CSR_READ(sc, WMREG_SWSM);
   10379 		swsm |= SWSM_SWESMBI;
   10380 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   10381 		/* If we managed to set the bit we got the semaphore. */
   10382 		swsm = CSR_READ(sc, WMREG_SWSM);
   10383 		if (swsm & SWSM_SWESMBI)
   10384 			break;
   10385 
   10386 		delay(50);
   10387 		timeout--;
   10388 	}
   10389 
   10390 	if (timeout == 0) {
   10391 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   10392 		/* Release semaphores */
   10393 		wm_put_swsm_semaphore(sc);
   10394 		return 1;
   10395 	}
   10396 	return 0;
   10397 }
   10398 
   10399 static void
   10400 wm_put_swsm_semaphore(struct wm_softc *sc)
   10401 {
   10402 	uint32_t swsm;
   10403 
   10404 	swsm = CSR_READ(sc, WMREG_SWSM);
   10405 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   10406 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   10407 }
   10408 
   10409 static int
   10410 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10411 {
   10412 	uint32_t swfw_sync;
   10413 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   10414 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   10415 	int timeout = 200;
   10416 
   10417 	for (timeout = 0; timeout < 200; timeout++) {
   10418 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10419 			if (wm_get_swsm_semaphore(sc)) {
   10420 				aprint_error_dev(sc->sc_dev,
   10421 				    "%s: failed to get semaphore\n",
   10422 				    __func__);
   10423 				return 1;
   10424 			}
   10425 		}
   10426 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10427 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   10428 			swfw_sync |= swmask;
   10429 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10430 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   10431 				wm_put_swsm_semaphore(sc);
   10432 			return 0;
   10433 		}
   10434 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   10435 			wm_put_swsm_semaphore(sc);
   10436 		delay(5000);
   10437 	}
   10438 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   10439 	    device_xname(sc->sc_dev), mask, swfw_sync);
   10440 	return 1;
   10441 }
   10442 
   10443 static void
   10444 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10445 {
   10446 	uint32_t swfw_sync;
   10447 
   10448 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10449 		while (wm_get_swsm_semaphore(sc) != 0)
   10450 			continue;
   10451 	}
   10452 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10453 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   10454 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10455 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   10456 		wm_put_swsm_semaphore(sc);
   10457 }
   10458 
   10459 static int
   10460 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   10461 {
   10462 	uint32_t ext_ctrl;
   10463 	int timeout = 200;
   10464 
   10465 	for (timeout = 0; timeout < 200; timeout++) {
   10466 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10467 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10468 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10469 
   10470 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10471 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   10472 			return 0;
   10473 		delay(5000);
   10474 	}
   10475 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   10476 	    device_xname(sc->sc_dev), ext_ctrl);
   10477 	return 1;
   10478 }
   10479 
   10480 static void
   10481 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   10482 {
   10483 	uint32_t ext_ctrl;
   10484 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10485 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10486 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10487 }
   10488 
   10489 static int
   10490 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   10491 {
   10492 	int i = 0;
   10493 	uint32_t reg;
   10494 
   10495 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10496 	do {
   10497 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   10498 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   10499 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10500 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   10501 			break;
   10502 		delay(2*1000);
   10503 		i++;
   10504 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   10505 
   10506 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   10507 		wm_put_hw_semaphore_82573(sc);
   10508 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   10509 		    device_xname(sc->sc_dev));
   10510 		return -1;
   10511 	}
   10512 
   10513 	return 0;
   10514 }
   10515 
   10516 static void
   10517 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   10518 {
   10519 	uint32_t reg;
   10520 
   10521 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10522 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10523 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10524 }
   10525 
   10526 /*
   10527  * Management mode and power management related subroutines.
   10528  * BMC, AMT, suspend/resume and EEE.
   10529  */
   10530 
   10531 static int
   10532 wm_check_mng_mode(struct wm_softc *sc)
   10533 {
   10534 	int rv;
   10535 
   10536 	switch (sc->sc_type) {
   10537 	case WM_T_ICH8:
   10538 	case WM_T_ICH9:
   10539 	case WM_T_ICH10:
   10540 	case WM_T_PCH:
   10541 	case WM_T_PCH2:
   10542 	case WM_T_PCH_LPT:
   10543 		rv = wm_check_mng_mode_ich8lan(sc);
   10544 		break;
   10545 	case WM_T_82574:
   10546 	case WM_T_82583:
   10547 		rv = wm_check_mng_mode_82574(sc);
   10548 		break;
   10549 	case WM_T_82571:
   10550 	case WM_T_82572:
   10551 	case WM_T_82573:
   10552 	case WM_T_80003:
   10553 		rv = wm_check_mng_mode_generic(sc);
   10554 		break;
   10555 	default:
   10556 		/* noting to do */
   10557 		rv = 0;
   10558 		break;
   10559 	}
   10560 
   10561 	return rv;
   10562 }
   10563 
   10564 static int
   10565 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   10566 {
   10567 	uint32_t fwsm;
   10568 
   10569 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10570 
   10571 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   10572 		return 1;
   10573 
   10574 	return 0;
   10575 }
   10576 
   10577 static int
   10578 wm_check_mng_mode_82574(struct wm_softc *sc)
   10579 {
   10580 	uint16_t data;
   10581 
   10582 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   10583 
   10584 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   10585 		return 1;
   10586 
   10587 	return 0;
   10588 }
   10589 
   10590 static int
   10591 wm_check_mng_mode_generic(struct wm_softc *sc)
   10592 {
   10593 	uint32_t fwsm;
   10594 
   10595 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10596 
   10597 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   10598 		return 1;
   10599 
   10600 	return 0;
   10601 }
   10602 
   10603 static int
   10604 wm_enable_mng_pass_thru(struct wm_softc *sc)
   10605 {
   10606 	uint32_t manc, fwsm, factps;
   10607 
   10608 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   10609 		return 0;
   10610 
   10611 	manc = CSR_READ(sc, WMREG_MANC);
   10612 
   10613 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   10614 		device_xname(sc->sc_dev), manc));
   10615 	if ((manc & MANC_RECV_TCO_EN) == 0)
   10616 		return 0;
   10617 
   10618 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   10619 		fwsm = CSR_READ(sc, WMREG_FWSM);
   10620 		factps = CSR_READ(sc, WMREG_FACTPS);
   10621 		if (((factps & FACTPS_MNGCG) == 0)
   10622 		    && ((fwsm & FWSM_MODE_MASK)
   10623 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   10624 			return 1;
   10625 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10626 		uint16_t data;
   10627 
   10628 		factps = CSR_READ(sc, WMREG_FACTPS);
   10629 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   10630 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   10631 			device_xname(sc->sc_dev), factps, data));
   10632 		if (((factps & FACTPS_MNGCG) == 0)
   10633 		    && ((data & NVM_CFG2_MNGM_MASK)
   10634 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   10635 			return 1;
   10636 	} else if (((manc & MANC_SMBUS_EN) != 0)
   10637 	    && ((manc & MANC_ASF_EN) == 0))
   10638 		return 1;
   10639 
   10640 	return 0;
   10641 }
   10642 
   10643 static int
   10644 wm_check_reset_block(struct wm_softc *sc)
   10645 {
   10646 	uint32_t reg;
   10647 
   10648 	switch (sc->sc_type) {
   10649 	case WM_T_ICH8:
   10650 	case WM_T_ICH9:
   10651 	case WM_T_ICH10:
   10652 	case WM_T_PCH:
   10653 	case WM_T_PCH2:
   10654 	case WM_T_PCH_LPT:
   10655 		reg = CSR_READ(sc, WMREG_FWSM);
   10656 		if ((reg & FWSM_RSPCIPHY) != 0)
   10657 			return 0;
   10658 		else
   10659 			return -1;
   10660 		break;
   10661 	case WM_T_82571:
   10662 	case WM_T_82572:
   10663 	case WM_T_82573:
   10664 	case WM_T_82574:
   10665 	case WM_T_82583:
   10666 	case WM_T_80003:
   10667 		reg = CSR_READ(sc, WMREG_MANC);
   10668 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   10669 			return -1;
   10670 		else
   10671 			return 0;
   10672 		break;
   10673 	default:
   10674 		/* no problem */
   10675 		break;
   10676 	}
   10677 
   10678 	return 0;
   10679 }
   10680 
   10681 static void
   10682 wm_get_hw_control(struct wm_softc *sc)
   10683 {
   10684 	uint32_t reg;
   10685 
   10686 	switch (sc->sc_type) {
   10687 	case WM_T_82573:
   10688 		reg = CSR_READ(sc, WMREG_SWSM);
   10689 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   10690 		break;
   10691 	case WM_T_82571:
   10692 	case WM_T_82572:
   10693 	case WM_T_82574:
   10694 	case WM_T_82583:
   10695 	case WM_T_80003:
   10696 	case WM_T_ICH8:
   10697 	case WM_T_ICH9:
   10698 	case WM_T_ICH10:
   10699 	case WM_T_PCH:
   10700 	case WM_T_PCH2:
   10701 	case WM_T_PCH_LPT:
   10702 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10703 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   10704 		break;
   10705 	default:
   10706 		break;
   10707 	}
   10708 }
   10709 
   10710 static void
   10711 wm_release_hw_control(struct wm_softc *sc)
   10712 {
   10713 	uint32_t reg;
   10714 
   10715 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   10716 		return;
   10717 
   10718 	if (sc->sc_type == WM_T_82573) {
   10719 		reg = CSR_READ(sc, WMREG_SWSM);
   10720 		reg &= ~SWSM_DRV_LOAD;
   10721 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   10722 	} else {
   10723 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10724 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   10725 	}
   10726 }
   10727 
   10728 static void
   10729 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   10730 {
   10731 	uint32_t reg;
   10732 
   10733 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10734 
   10735 	if (on != 0)
   10736 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   10737 	else
   10738 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   10739 
   10740 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10741 }
   10742 
   10743 static void
   10744 wm_smbustopci(struct wm_softc *sc)
   10745 {
   10746 	uint32_t fwsm;
   10747 
   10748 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10749 	if (((fwsm & FWSM_FW_VALID) == 0)
   10750 	    && ((wm_check_reset_block(sc) == 0))) {
   10751 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   10752 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   10753 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10754 		CSR_WRITE_FLUSH(sc);
   10755 		delay(10);
   10756 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   10757 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10758 		CSR_WRITE_FLUSH(sc);
   10759 		delay(50*1000);
   10760 
   10761 		/*
   10762 		 * Gate automatic PHY configuration by hardware on non-managed
   10763 		 * 82579
   10764 		 */
   10765 		if (sc->sc_type == WM_T_PCH2)
   10766 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   10767 	}
   10768 }
   10769 
   10770 static void
   10771 wm_init_manageability(struct wm_softc *sc)
   10772 {
   10773 
   10774 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   10775 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   10776 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   10777 
   10778 		/* Disable hardware interception of ARP */
   10779 		manc &= ~MANC_ARP_EN;
   10780 
   10781 		/* Enable receiving management packets to the host */
   10782 		if (sc->sc_type >= WM_T_82571) {
   10783 			manc |= MANC_EN_MNG2HOST;
   10784 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   10785 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   10786 		}
   10787 
   10788 		CSR_WRITE(sc, WMREG_MANC, manc);
   10789 	}
   10790 }
   10791 
   10792 static void
   10793 wm_release_manageability(struct wm_softc *sc)
   10794 {
   10795 
   10796 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   10797 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   10798 
   10799 		manc |= MANC_ARP_EN;
   10800 		if (sc->sc_type >= WM_T_82571)
   10801 			manc &= ~MANC_EN_MNG2HOST;
   10802 
   10803 		CSR_WRITE(sc, WMREG_MANC, manc);
   10804 	}
   10805 }
   10806 
   10807 static void
   10808 wm_get_wakeup(struct wm_softc *sc)
   10809 {
   10810 
   10811 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   10812 	switch (sc->sc_type) {
   10813 	case WM_T_82573:
   10814 	case WM_T_82583:
   10815 		sc->sc_flags |= WM_F_HAS_AMT;
   10816 		/* FALLTHROUGH */
   10817 	case WM_T_80003:
   10818 	case WM_T_82541:
   10819 	case WM_T_82547:
   10820 	case WM_T_82571:
   10821 	case WM_T_82572:
   10822 	case WM_T_82574:
   10823 	case WM_T_82575:
   10824 	case WM_T_82576:
   10825 	case WM_T_82580:
   10826 	case WM_T_I350:
   10827 	case WM_T_I354:
   10828 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   10829 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   10830 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   10831 		break;
   10832 	case WM_T_ICH8:
   10833 	case WM_T_ICH9:
   10834 	case WM_T_ICH10:
   10835 	case WM_T_PCH:
   10836 	case WM_T_PCH2:
   10837 	case WM_T_PCH_LPT:
   10838 		sc->sc_flags |= WM_F_HAS_AMT;
   10839 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   10840 		break;
   10841 	default:
   10842 		break;
   10843 	}
   10844 
   10845 	/* 1: HAS_MANAGE */
   10846 	if (wm_enable_mng_pass_thru(sc) != 0)
   10847 		sc->sc_flags |= WM_F_HAS_MANAGE;
   10848 
   10849 #ifdef WM_DEBUG
   10850 	printf("\n");
   10851 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   10852 		printf("HAS_AMT,");
   10853 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   10854 		printf("ARC_SUBSYS_VALID,");
   10855 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   10856 		printf("ASF_FIRMWARE_PRES,");
   10857 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   10858 		printf("HAS_MANAGE,");
   10859 	printf("\n");
   10860 #endif
   10861 	/*
   10862 	 * Note that the WOL flags is set after the resetting of the eeprom
   10863 	 * stuff
   10864 	 */
   10865 }
   10866 
   10867 #ifdef WM_WOL
   10868 /* WOL in the newer chipset interfaces (pchlan) */
   10869 static void
   10870 wm_enable_phy_wakeup(struct wm_softc *sc)
   10871 {
   10872 #if 0
   10873 	uint16_t preg;
   10874 
   10875 	/* Copy MAC RARs to PHY RARs */
   10876 
   10877 	/* Copy MAC MTA to PHY MTA */
   10878 
   10879 	/* Configure PHY Rx Control register */
   10880 
   10881 	/* Enable PHY wakeup in MAC register */
   10882 
   10883 	/* Configure and enable PHY wakeup in PHY registers */
   10884 
   10885 	/* Activate PHY wakeup */
   10886 
   10887 	/* XXX */
   10888 #endif
   10889 }
   10890 
   10891 /* Power down workaround on D3 */
   10892 static void
   10893 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   10894 {
   10895 	uint32_t reg;
   10896 	int i;
   10897 
   10898 	for (i = 0; i < 2; i++) {
   10899 		/* Disable link */
   10900 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10901 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   10902 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10903 
   10904 		/*
   10905 		 * Call gig speed drop workaround on Gig disable before
   10906 		 * accessing any PHY registers
   10907 		 */
   10908 		if (sc->sc_type == WM_T_ICH8)
   10909 			wm_gig_downshift_workaround_ich8lan(sc);
   10910 
   10911 		/* Write VR power-down enable */
   10912 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   10913 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   10914 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   10915 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   10916 
   10917 		/* Read it back and test */
   10918 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   10919 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   10920 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   10921 			break;
   10922 
   10923 		/* Issue PHY reset and repeat at most one more time */
   10924 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10925 	}
   10926 }
   10927 
   10928 static void
   10929 wm_enable_wakeup(struct wm_softc *sc)
   10930 {
   10931 	uint32_t reg, pmreg;
   10932 	pcireg_t pmode;
   10933 
   10934 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   10935 		&pmreg, NULL) == 0)
   10936 		return;
   10937 
   10938 	/* Advertise the wakeup capability */
   10939 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   10940 	    | CTRL_SWDPIN(3));
   10941 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   10942 
   10943 	/* ICH workaround */
   10944 	switch (sc->sc_type) {
   10945 	case WM_T_ICH8:
   10946 	case WM_T_ICH9:
   10947 	case WM_T_ICH10:
   10948 	case WM_T_PCH:
   10949 	case WM_T_PCH2:
   10950 	case WM_T_PCH_LPT:
   10951 		/* Disable gig during WOL */
   10952 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10953 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   10954 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10955 		if (sc->sc_type == WM_T_PCH)
   10956 			wm_gmii_reset(sc);
   10957 
   10958 		/* Power down workaround */
   10959 		if (sc->sc_phytype == WMPHY_82577) {
   10960 			struct mii_softc *child;
   10961 
   10962 			/* Assume that the PHY is copper */
   10963 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10964 			if (child->mii_mpd_rev <= 2)
   10965 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   10966 				    (768 << 5) | 25, 0x0444); /* magic num */
   10967 		}
   10968 		break;
   10969 	default:
   10970 		break;
   10971 	}
   10972 
   10973 	/* Keep the laser running on fiber adapters */
   10974 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   10975 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   10976 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10977 		reg |= CTRL_EXT_SWDPIN(3);
   10978 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10979 	}
   10980 
   10981 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   10982 #if 0	/* for the multicast packet */
   10983 	reg |= WUFC_MC;
   10984 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   10985 #endif
   10986 
   10987 	if (sc->sc_type == WM_T_PCH) {
   10988 		wm_enable_phy_wakeup(sc);
   10989 	} else {
   10990 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   10991 		CSR_WRITE(sc, WMREG_WUFC, reg);
   10992 	}
   10993 
   10994 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10995 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10996 		|| (sc->sc_type == WM_T_PCH2))
   10997 		    && (sc->sc_phytype == WMPHY_IGP_3))
   10998 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   10999 
   11000 	/* Request PME */
   11001 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11002 #if 0
   11003 	/* Disable WOL */
   11004 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11005 #else
   11006 	/* For WOL */
   11007 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11008 #endif
   11009 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11010 }
   11011 #endif /* WM_WOL */
   11012 
   11013 /* EEE */
   11014 
   11015 static void
   11016 wm_set_eee_i350(struct wm_softc *sc)
   11017 {
   11018 	uint32_t ipcnfg, eeer;
   11019 
   11020 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11021 	eeer = CSR_READ(sc, WMREG_EEER);
   11022 
   11023 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11024 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11025 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11026 		    | EEER_LPI_FC);
   11027 	} else {
   11028 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11029 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11030 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11031 		    | EEER_LPI_FC);
   11032 	}
   11033 
   11034 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11035 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11036 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11037 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11038 }
   11039 
   11040 /*
   11041  * Workarounds (mainly PHY related).
   11042  * Basically, PHY's workarounds are in the PHY drivers.
   11043  */
   11044 
   11045 /* Work-around for 82566 Kumeran PCS lock loss */
   11046 static void
   11047 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11048 {
   11049 	int miistatus, active, i;
   11050 	int reg;
   11051 
   11052 	miistatus = sc->sc_mii.mii_media_status;
   11053 
   11054 	/* If the link is not up, do nothing */
   11055 	if ((miistatus & IFM_ACTIVE) != 0)
   11056 		return;
   11057 
   11058 	active = sc->sc_mii.mii_media_active;
   11059 
   11060 	/* Nothing to do if the link is other than 1Gbps */
   11061 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11062 		return;
   11063 
   11064 	for (i = 0; i < 10; i++) {
   11065 		/* read twice */
   11066 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11067 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11068 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   11069 			goto out;	/* GOOD! */
   11070 
   11071 		/* Reset the PHY */
   11072 		wm_gmii_reset(sc);
   11073 		delay(5*1000);
   11074 	}
   11075 
   11076 	/* Disable GigE link negotiation */
   11077 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11078 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11079 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11080 
   11081 	/*
   11082 	 * Call gig speed drop workaround on Gig disable before accessing
   11083 	 * any PHY registers.
   11084 	 */
   11085 	wm_gig_downshift_workaround_ich8lan(sc);
   11086 
   11087 out:
   11088 	return;
   11089 }
   11090 
   11091 /* WOL from S5 stops working */
   11092 static void
   11093 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11094 {
   11095 	uint16_t kmrn_reg;
   11096 
   11097 	/* Only for igp3 */
   11098 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11099 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11100 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11101 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11102 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11103 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11104 	}
   11105 }
   11106 
   11107 /*
   11108  * Workaround for pch's PHYs
   11109  * XXX should be moved to new PHY driver?
   11110  */
   11111 static void
   11112 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11113 {
   11114 	if (sc->sc_phytype == WMPHY_82577)
   11115 		wm_set_mdio_slow_mode_hv(sc);
   11116 
   11117 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11118 
   11119 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11120 
   11121 	/* 82578 */
   11122 	if (sc->sc_phytype == WMPHY_82578) {
   11123 		/* PCH rev. < 3 */
   11124 		if (sc->sc_rev < 3) {
   11125 			/* XXX 6 bit shift? Why? Is it page2? */
   11126 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11127 			    0x66c0);
   11128 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11129 			    0xffff);
   11130 		}
   11131 
   11132 		/* XXX phy rev. < 2 */
   11133 	}
   11134 
   11135 	/* Select page 0 */
   11136 
   11137 	/* XXX acquire semaphore */
   11138 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11139 	/* XXX release semaphore */
   11140 
   11141 	/*
   11142 	 * Configure the K1 Si workaround during phy reset assuming there is
   11143 	 * link so that it disables K1 if link is in 1Gbps.
   11144 	 */
   11145 	wm_k1_gig_workaround_hv(sc, 1);
   11146 }
   11147 
   11148 static void
   11149 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11150 {
   11151 
   11152 	wm_set_mdio_slow_mode_hv(sc);
   11153 }
   11154 
   11155 static void
   11156 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11157 {
   11158 	int k1_enable = sc->sc_nvm_k1_enabled;
   11159 
   11160 	/* XXX acquire semaphore */
   11161 
   11162 	if (link) {
   11163 		k1_enable = 0;
   11164 
   11165 		/* Link stall fix for link up */
   11166 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11167 	} else {
   11168 		/* Link stall fix for link down */
   11169 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11170 	}
   11171 
   11172 	wm_configure_k1_ich8lan(sc, k1_enable);
   11173 
   11174 	/* XXX release semaphore */
   11175 }
   11176 
   11177 static void
   11178 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11179 {
   11180 	uint32_t reg;
   11181 
   11182 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11183 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11184 	    reg | HV_KMRN_MDIO_SLOW);
   11185 }
   11186 
   11187 static void
   11188 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11189 {
   11190 	uint32_t ctrl, ctrl_ext, tmp;
   11191 	uint16_t kmrn_reg;
   11192 
   11193 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11194 
   11195 	if (k1_enable)
   11196 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11197 	else
   11198 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11199 
   11200 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11201 
   11202 	delay(20);
   11203 
   11204 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11205 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11206 
   11207 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11208 	tmp |= CTRL_FRCSPD;
   11209 
   11210 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11211 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11212 	CSR_WRITE_FLUSH(sc);
   11213 	delay(20);
   11214 
   11215 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11216 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11217 	CSR_WRITE_FLUSH(sc);
   11218 	delay(20);
   11219 }
   11220 
   11221 /* special case - for 82575 - need to do manual init ... */
   11222 static void
   11223 wm_reset_init_script_82575(struct wm_softc *sc)
   11224 {
   11225 	/*
   11226 	 * remark: this is untested code - we have no board without EEPROM
   11227 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11228 	 */
   11229 
   11230 	/* SerDes configuration via SERDESCTRL */
   11231 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11232 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11233 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11234 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11235 
   11236 	/* CCM configuration via CCMCTL register */
   11237 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11238 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11239 
   11240 	/* PCIe lanes configuration */
   11241 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11242 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11243 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11244 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11245 
   11246 	/* PCIe PLL Configuration */
   11247 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11248 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11249 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11250 }
   11251 
   11252 static void
   11253 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11254 {
   11255 	uint32_t reg;
   11256 	uint16_t nvmword;
   11257 	int rv;
   11258 
   11259 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11260 		return;
   11261 
   11262 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11263 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11264 	if (rv != 0) {
   11265 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11266 		    __func__);
   11267 		return;
   11268 	}
   11269 
   11270 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11271 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11272 		reg |= MDICNFG_DEST;
   11273 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11274 		reg |= MDICNFG_COM_MDIO;
   11275 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11276 }
   11277 
   11278 /*
   11279  * I210 Errata 25 and I211 Errata 10
   11280  * Slow System Clock.
   11281  */
   11282 static void
   11283 wm_pll_workaround_i210(struct wm_softc *sc)
   11284 {
   11285 	uint32_t mdicnfg, wuc;
   11286 	uint32_t reg;
   11287 	pcireg_t pcireg;
   11288 	uint32_t pmreg;
   11289 	uint16_t nvmword, tmp_nvmword;
   11290 	int phyval;
   11291 	bool wa_done = false;
   11292 	int i;
   11293 
   11294 	/* Save WUC and MDICNFG registers */
   11295 	wuc = CSR_READ(sc, WMREG_WUC);
   11296 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   11297 
   11298 	reg = mdicnfg & ~MDICNFG_DEST;
   11299 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11300 
   11301 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   11302 		nvmword = INVM_DEFAULT_AL;
   11303 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   11304 
   11305 	/* Get Power Management cap offset */
   11306 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11307 		&pmreg, NULL) == 0)
   11308 		return;
   11309 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   11310 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   11311 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   11312 
   11313 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   11314 			break; /* OK */
   11315 		}
   11316 
   11317 		wa_done = true;
   11318 		/* Directly reset the internal PHY */
   11319 		reg = CSR_READ(sc, WMREG_CTRL);
   11320 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   11321 
   11322 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11323 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   11324 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11325 
   11326 		CSR_WRITE(sc, WMREG_WUC, 0);
   11327 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   11328 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11329 
   11330 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   11331 		    pmreg + PCI_PMCSR);
   11332 		pcireg |= PCI_PMCSR_STATE_D3;
   11333 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11334 		    pmreg + PCI_PMCSR, pcireg);
   11335 		delay(1000);
   11336 		pcireg &= ~PCI_PMCSR_STATE_D3;
   11337 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11338 		    pmreg + PCI_PMCSR, pcireg);
   11339 
   11340 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   11341 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11342 
   11343 		/* Restore WUC register */
   11344 		CSR_WRITE(sc, WMREG_WUC, wuc);
   11345 	}
   11346 
   11347 	/* Restore MDICNFG setting */
   11348 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   11349 	if (wa_done)
   11350 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   11351 }
   11352