Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.362
      1 /*	$NetBSD: if_wm.c,v 1.362 2015/10/13 08:27:11 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- EEE (Energy Efficiency Ethernet)
     77  *	- Multi queue
     78  *	- Image Unique ID
     79  *	- LPLU other than PCH*
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.362 2015/10/13 08:27:11 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    151 
    152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    153 #else
    154 #define	DPRINTF(x, y)	/* nothing */
    155 #endif /* WM_DEBUG */
    156 
    157 #ifdef NET_MPSAFE
    158 #define WM_MPSAFE	1
    159 #endif
    160 
    161 #ifdef __HAVE_PCI_MSI_MSIX
    162 #define WM_MSI_MSIX	1 /* Enable by default */
    163 #endif
    164 
    165 /*
    166  * This device driver divides interrupt to TX, RX and link state.
    167  * Each MSI-X vector indexes are below.
    168  */
    169 #define WM_MSIX_NINTR		3
    170 #define WM_MSIX_TXINTR_IDX	0
    171 #define WM_MSIX_RXINTR_IDX	1
    172 #define WM_MSIX_LINKINTR_IDX	2
    173 #define WM_MAX_NINTR		WM_MSIX_NINTR
    174 
    175 /*
    176  * This device driver set affinity to each interrupts like below (round-robin).
    177  * If the number CPUs is less than the number of interrupts, this driver usase
    178  * the same CPU for multiple interrupts.
    179  */
    180 #define WM_MSIX_TXINTR_CPUID	0
    181 #define WM_MSIX_RXINTR_CPUID	1
    182 #define WM_MSIX_LINKINTR_CPUID	2
    183 
    184 /*
    185  * Transmit descriptor list size.  Due to errata, we can only have
    186  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    187  * on >= 82544.  We tell the upper layers that they can queue a lot
    188  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    189  * of them at a time.
    190  *
    191  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    192  * chains containing many small mbufs have been observed in zero-copy
    193  * situations with jumbo frames.
    194  */
    195 #define	WM_NTXSEGS		256
    196 #define	WM_IFQUEUELEN		256
    197 #define	WM_TXQUEUELEN_MAX	64
    198 #define	WM_TXQUEUELEN_MAX_82547	16
    199 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    200 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    201 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    202 #define	WM_NTXDESC_82542	256
    203 #define	WM_NTXDESC_82544	4096
    204 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    205 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    206 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    207 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    208 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    209 
    210 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    211 
    212 /*
    213  * Receive descriptor list size.  We have one Rx buffer for normal
    214  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    215  * packet.  We allocate 256 receive descriptors, each with a 2k
    216  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    217  */
    218 #define	WM_NRXDESC		256
    219 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    220 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    221 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    222 
    223 typedef union txdescs {
    224 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    225 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    226 } txdescs_t;
    227 
    228 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    229 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    230 
    231 /*
    232  * Software state for transmit jobs.
    233  */
    234 struct wm_txsoft {
    235 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    236 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    237 	int txs_firstdesc;		/* first descriptor in packet */
    238 	int txs_lastdesc;		/* last descriptor in packet */
    239 	int txs_ndesc;			/* # of descriptors used */
    240 };
    241 
    242 /*
    243  * Software state for receive buffers.  Each descriptor gets a
    244  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    245  * more than one buffer, we chain them together.
    246  */
    247 struct wm_rxsoft {
    248 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    249 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    250 };
    251 
    252 #define WM_LINKUP_TIMEOUT	50
    253 
    254 static uint16_t swfwphysem[] = {
    255 	SWFW_PHY0_SM,
    256 	SWFW_PHY1_SM,
    257 	SWFW_PHY2_SM,
    258 	SWFW_PHY3_SM
    259 };
    260 
    261 static const uint32_t wm_82580_rxpbs_table[] = {
    262 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    263 };
    264 
    265 struct wm_softc;
    266 
    267 struct wm_txqueue {
    268 	kmutex_t *txq_lock;		/* lock for tx operations */
    269 
    270 	struct wm_softc *txq_sc;
    271 
    272 	/* Software state for the transmit descriptors. */
    273 	int txq_num;			/* must be a power of two */
    274 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    275 
    276 	/* TX control data structures. */
    277 	int txq_ndesc;			/* must be a power of two */
    278 	txdescs_t *txq_descs_u;
    279         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    280 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    281 	int txq_desc_rseg;		/* real number of control segment */
    282 	size_t txq_desc_size;		/* control data size */
    283 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    284 #define	txq_descs	txq_descs_u->sctxu_txdescs
    285 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    286 
    287 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    288 
    289 	int txq_free;			/* number of free Tx descriptors */
    290 	int txq_next;			/* next ready Tx descriptor */
    291 
    292 	int txq_sfree;			/* number of free Tx jobs */
    293 	int txq_snext;			/* next free Tx job */
    294 	int txq_sdirty;			/* dirty Tx jobs */
    295 
    296 	/* These 4 variables are used only on the 82547. */
    297 	int txq_fifo_size;		/* Tx FIFO size */
    298 	int txq_fifo_head;		/* current head of FIFO */
    299 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    300 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    301 
    302 	/* XXX which event counter is required? */
    303 };
    304 
    305 struct wm_rxqueue {
    306 	kmutex_t *rxq_lock;		/* lock for rx operations */
    307 
    308 	struct wm_softc *rxq_sc;
    309 
    310 	/* Software state for the receive descriptors. */
    311 	wiseman_rxdesc_t *rxq_descs;
    312 
    313 	/* RX control data structures. */
    314 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    315 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    316 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    317 	int rxq_desc_rseg;		/* real number of control segment */
    318 	size_t rxq_desc_size;		/* control data size */
    319 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    320 
    321 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    322 
    323 	int rxq_ptr;			/* next ready Rx descriptor/queue ent */
    324 	int rxq_discard;
    325 	int rxq_len;
    326 	struct mbuf *rxq_head;
    327 	struct mbuf *rxq_tail;
    328 	struct mbuf **rxq_tailp;
    329 
    330 	/* XXX which event counter is required? */
    331 };
    332 
    333 /*
    334  * Software state per device.
    335  */
    336 struct wm_softc {
    337 	device_t sc_dev;		/* generic device information */
    338 	bus_space_tag_t sc_st;		/* bus space tag */
    339 	bus_space_handle_t sc_sh;	/* bus space handle */
    340 	bus_size_t sc_ss;		/* bus space size */
    341 	bus_space_tag_t sc_iot;		/* I/O space tag */
    342 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    343 	bus_size_t sc_ios;		/* I/O space size */
    344 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    345 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    346 	bus_size_t sc_flashs;		/* flash registers space size */
    347 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    348 
    349 	struct ethercom sc_ethercom;	/* ethernet common data */
    350 	struct mii_data sc_mii;		/* MII/media information */
    351 
    352 	pci_chipset_tag_t sc_pc;
    353 	pcitag_t sc_pcitag;
    354 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    355 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    356 
    357 	uint16_t sc_pcidevid;		/* PCI device ID */
    358 	wm_chip_type sc_type;		/* MAC type */
    359 	int sc_rev;			/* MAC revision */
    360 	wm_phy_type sc_phytype;		/* PHY type */
    361 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    362 #define	WM_MEDIATYPE_UNKNOWN		0x00
    363 #define	WM_MEDIATYPE_FIBER		0x01
    364 #define	WM_MEDIATYPE_COPPER		0x02
    365 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    366 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    367 	int sc_flags;			/* flags; see below */
    368 	int sc_if_flags;		/* last if_flags */
    369 	int sc_flowflags;		/* 802.3x flow control flags */
    370 	int sc_align_tweak;
    371 
    372 	void *sc_ihs[WM_MAX_NINTR];	/*
    373 					 * interrupt cookie.
    374 					 * legacy and msi use sc_ihs[0].
    375 					 */
    376 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    377 	int sc_nintrs;			/* number of interrupts */
    378 
    379 	callout_t sc_tick_ch;		/* tick callout */
    380 	bool sc_stopping;
    381 
    382 	int sc_nvm_ver_major;
    383 	int sc_nvm_ver_minor;
    384 	int sc_nvm_ver_build;
    385 	int sc_nvm_addrbits;		/* NVM address bits */
    386 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    387 	int sc_ich8_flash_base;
    388 	int sc_ich8_flash_bank_size;
    389 	int sc_nvm_k1_enabled;
    390 
    391 	int sc_ntxqueues;
    392 	struct wm_txqueue *sc_txq;
    393 
    394 	int sc_nrxqueues;
    395 	struct wm_rxqueue *sc_rxq;
    396 
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* Event counters. */
    399 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    400 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    401 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    402 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    403 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    404 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    405 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    406 
    407 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    408 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    409 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    410 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    411 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    412 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    413 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    414 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    415 
    416 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    417 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    418 
    419 	struct evcnt sc_ev_tu;		/* Tx underrun */
    420 
    421 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    422 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    423 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    424 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    425 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    426 #endif /* WM_EVENT_COUNTERS */
    427 
    428 	/* This variable are used only on the 82547. */
    429 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    430 
    431 	uint32_t sc_ctrl;		/* prototype CTRL register */
    432 #if 0
    433 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    434 #endif
    435 	uint32_t sc_icr;		/* prototype interrupt bits */
    436 	uint32_t sc_itr;		/* prototype intr throttling reg */
    437 	uint32_t sc_tctl;		/* prototype TCTL register */
    438 	uint32_t sc_rctl;		/* prototype RCTL register */
    439 	uint32_t sc_txcw;		/* prototype TXCW register */
    440 	uint32_t sc_tipg;		/* prototype TIPG register */
    441 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    442 	uint32_t sc_pba;		/* prototype PBA register */
    443 
    444 	int sc_tbi_linkup;		/* TBI link status */
    445 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    446 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    447 
    448 	int sc_mchash_type;		/* multicast filter offset */
    449 
    450 	krndsource_t rnd_source;	/* random source */
    451 
    452 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    453 };
    454 
    455 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    456 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    457 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    458 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    459 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    460 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    461 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    462 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    463 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    464 
    465 #ifdef WM_MPSAFE
    466 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    467 #else
    468 #define CALLOUT_FLAGS	0
    469 #endif
    470 
    471 #define	WM_RXCHAIN_RESET(rxq)						\
    472 do {									\
    473 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    474 	*(rxq)->rxq_tailp = NULL;					\
    475 	(rxq)->rxq_len = 0;						\
    476 } while (/*CONSTCOND*/0)
    477 
    478 #define	WM_RXCHAIN_LINK(rxq, m)						\
    479 do {									\
    480 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    481 	(rxq)->rxq_tailp = &(m)->m_next;				\
    482 } while (/*CONSTCOND*/0)
    483 
    484 #ifdef WM_EVENT_COUNTERS
    485 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    486 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    487 #else
    488 #define	WM_EVCNT_INCR(ev)	/* nothing */
    489 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    490 #endif
    491 
    492 #define	CSR_READ(sc, reg)						\
    493 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    494 #define	CSR_WRITE(sc, reg, val)						\
    495 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    496 #define	CSR_WRITE_FLUSH(sc)						\
    497 	(void) CSR_READ((sc), WMREG_STATUS)
    498 
    499 #define ICH8_FLASH_READ32(sc, reg) \
    500 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    501 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    502 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    503 
    504 #define ICH8_FLASH_READ16(sc, reg) \
    505 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    506 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    507 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    508 
    509 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    510 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    511 
    512 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    513 #define	WM_CDTXADDR_HI(txq, x)						\
    514 	(sizeof(bus_addr_t) == 8 ?					\
    515 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    516 
    517 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    518 #define	WM_CDRXADDR_HI(rxq, x)						\
    519 	(sizeof(bus_addr_t) == 8 ?					\
    520 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    521 
    522 /*
    523  * Register read/write functions.
    524  * Other than CSR_{READ|WRITE}().
    525  */
    526 #if 0
    527 static inline uint32_t wm_io_read(struct wm_softc *, int);
    528 #endif
    529 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    530 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    531 	uint32_t, uint32_t);
    532 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    533 
    534 /*
    535  * Descriptor sync/init functions.
    536  */
    537 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    538 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    539 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    540 
    541 /*
    542  * Device driver interface functions and commonly used functions.
    543  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    544  */
    545 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    546 static int	wm_match(device_t, cfdata_t, void *);
    547 static void	wm_attach(device_t, device_t, void *);
    548 static int	wm_detach(device_t, int);
    549 static bool	wm_suspend(device_t, const pmf_qual_t *);
    550 static bool	wm_resume(device_t, const pmf_qual_t *);
    551 static void	wm_watchdog(struct ifnet *);
    552 static void	wm_tick(void *);
    553 static int	wm_ifflags_cb(struct ethercom *);
    554 static int	wm_ioctl(struct ifnet *, u_long, void *);
    555 /* MAC address related */
    556 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    557 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    558 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    559 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    560 static void	wm_set_filter(struct wm_softc *);
    561 /* Reset and init related */
    562 static void	wm_set_vlan(struct wm_softc *);
    563 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    564 static void	wm_get_auto_rd_done(struct wm_softc *);
    565 static void	wm_lan_init_done(struct wm_softc *);
    566 static void	wm_get_cfg_done(struct wm_softc *);
    567 static void	wm_initialize_hardware_bits(struct wm_softc *);
    568 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    569 static void	wm_reset(struct wm_softc *);
    570 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    571 static void	wm_rxdrain(struct wm_rxqueue *);
    572 static int	wm_init(struct ifnet *);
    573 static int	wm_init_locked(struct ifnet *);
    574 static void	wm_stop(struct ifnet *, int);
    575 static void	wm_stop_locked(struct ifnet *, int);
    576 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    577     uint32_t *, uint8_t *);
    578 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    579 static void	wm_82547_txfifo_stall(void *);
    580 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    581 /* DMA related */
    582 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    583 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    584 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    585 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    586 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    587 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    588 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    589 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    590 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    591 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    592 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    593 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    594 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    595 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    596 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    597 static int	wm_alloc_txrx_queues(struct wm_softc *);
    598 static void	wm_free_txrx_queues(struct wm_softc *);
    599 static int	wm_init_txrx_queues(struct wm_softc *);
    600 /* Start */
    601 static void	wm_start(struct ifnet *);
    602 static void	wm_start_locked(struct ifnet *);
    603 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    604     uint32_t *, uint32_t *, bool *);
    605 static void	wm_nq_start(struct ifnet *);
    606 static void	wm_nq_start_locked(struct ifnet *);
    607 /* Interrupt */
    608 static int	wm_txeof(struct wm_softc *);
    609 static void	wm_rxeof(struct wm_rxqueue *);
    610 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    611 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    612 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    613 static void	wm_linkintr(struct wm_softc *, uint32_t);
    614 static int	wm_intr_legacy(void *);
    615 #ifdef WM_MSI_MSIX
    616 static int	wm_setup_legacy(struct wm_softc *);
    617 static int	wm_setup_msix(struct wm_softc *);
    618 static int	wm_txintr_msix(void *);
    619 static int	wm_rxintr_msix(void *);
    620 static int	wm_linkintr_msix(void *);
    621 #endif
    622 
    623 /*
    624  * Media related.
    625  * GMII, SGMII, TBI, SERDES and SFP.
    626  */
    627 /* Common */
    628 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    629 /* GMII related */
    630 static void	wm_gmii_reset(struct wm_softc *);
    631 static int	wm_get_phy_id_82575(struct wm_softc *);
    632 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    633 static int	wm_gmii_mediachange(struct ifnet *);
    634 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    635 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    636 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    637 static int	wm_gmii_i82543_readreg(device_t, int, int);
    638 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    639 static int	wm_gmii_i82544_readreg(device_t, int, int);
    640 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    641 static int	wm_gmii_i80003_readreg(device_t, int, int);
    642 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    643 static int	wm_gmii_bm_readreg(device_t, int, int);
    644 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    645 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    646 static int	wm_gmii_hv_readreg(device_t, int, int);
    647 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    648 static int	wm_gmii_82580_readreg(device_t, int, int);
    649 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    650 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    651 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    652 static void	wm_gmii_statchg(struct ifnet *);
    653 static int	wm_kmrn_readreg(struct wm_softc *, int);
    654 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    655 /* SGMII */
    656 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    657 static int	wm_sgmii_readreg(device_t, int, int);
    658 static void	wm_sgmii_writereg(device_t, int, int, int);
    659 /* TBI related */
    660 static void	wm_tbi_mediainit(struct wm_softc *);
    661 static int	wm_tbi_mediachange(struct ifnet *);
    662 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    663 static int	wm_check_for_link(struct wm_softc *);
    664 static void	wm_tbi_tick(struct wm_softc *);
    665 /* SERDES related */
    666 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    667 static int	wm_serdes_mediachange(struct ifnet *);
    668 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    669 static void	wm_serdes_tick(struct wm_softc *);
    670 /* SFP related */
    671 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    672 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    673 
    674 /*
    675  * NVM related.
    676  * Microwire, SPI (w/wo EERD) and Flash.
    677  */
    678 /* Misc functions */
    679 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    680 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    681 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    682 /* Microwire */
    683 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    684 /* SPI */
    685 static int	wm_nvm_ready_spi(struct wm_softc *);
    686 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    687 /* Using with EERD */
    688 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    689 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    690 /* Flash */
    691 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    692     unsigned int *);
    693 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    694 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    695 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    696 	uint16_t *);
    697 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    698 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    699 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    700 /* iNVM */
    701 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    702 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    703 /* Lock, detecting NVM type, validate checksum and read */
    704 static int	wm_nvm_acquire(struct wm_softc *);
    705 static void	wm_nvm_release(struct wm_softc *);
    706 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    707 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    708 static int	wm_nvm_validate_checksum(struct wm_softc *);
    709 static void	wm_nvm_version_invm(struct wm_softc *);
    710 static void	wm_nvm_version(struct wm_softc *);
    711 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    712 
    713 /*
    714  * Hardware semaphores.
    715  * Very complexed...
    716  */
    717 static int	wm_get_swsm_semaphore(struct wm_softc *);
    718 static void	wm_put_swsm_semaphore(struct wm_softc *);
    719 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    720 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    721 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    722 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    723 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    724 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    725 
    726 /*
    727  * Management mode and power management related subroutines.
    728  * BMC, AMT, suspend/resume and EEE.
    729  */
    730 static int	wm_check_mng_mode(struct wm_softc *);
    731 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    732 static int	wm_check_mng_mode_82574(struct wm_softc *);
    733 static int	wm_check_mng_mode_generic(struct wm_softc *);
    734 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    735 static int	wm_check_reset_block(struct wm_softc *);
    736 static void	wm_get_hw_control(struct wm_softc *);
    737 static void	wm_release_hw_control(struct wm_softc *);
    738 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    739 static void	wm_smbustopci(struct wm_softc *);
    740 static void	wm_init_manageability(struct wm_softc *);
    741 static void	wm_release_manageability(struct wm_softc *);
    742 static void	wm_get_wakeup(struct wm_softc *);
    743 #ifdef WM_WOL
    744 static void	wm_enable_phy_wakeup(struct wm_softc *);
    745 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    746 static void	wm_enable_wakeup(struct wm_softc *);
    747 #endif
    748 /* EEE */
    749 static void	wm_set_eee_i350(struct wm_softc *);
    750 
    751 /*
    752  * Workarounds (mainly PHY related).
    753  * Basically, PHY's workarounds are in the PHY drivers.
    754  */
    755 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    756 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    757 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    758 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    759 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    760 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    761 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    762 static void	wm_reset_init_script_82575(struct wm_softc *);
    763 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    764 static void	wm_pll_workaround_i210(struct wm_softc *);
    765 
    766 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    767     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    768 
    769 /*
    770  * Devices supported by this driver.
    771  */
    772 static const struct wm_product {
    773 	pci_vendor_id_t		wmp_vendor;
    774 	pci_product_id_t	wmp_product;
    775 	const char		*wmp_name;
    776 	wm_chip_type		wmp_type;
    777 	uint32_t		wmp_flags;
    778 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    779 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    780 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    781 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    782 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    783 } wm_products[] = {
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    785 	  "Intel i82542 1000BASE-X Ethernet",
    786 	  WM_T_82542_2_1,	WMP_F_FIBER },
    787 
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    789 	  "Intel i82543GC 1000BASE-X Ethernet",
    790 	  WM_T_82543,		WMP_F_FIBER },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    793 	  "Intel i82543GC 1000BASE-T Ethernet",
    794 	  WM_T_82543,		WMP_F_COPPER },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    797 	  "Intel i82544EI 1000BASE-T Ethernet",
    798 	  WM_T_82544,		WMP_F_COPPER },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    801 	  "Intel i82544EI 1000BASE-X Ethernet",
    802 	  WM_T_82544,		WMP_F_FIBER },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    805 	  "Intel i82544GC 1000BASE-T Ethernet",
    806 	  WM_T_82544,		WMP_F_COPPER },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    809 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    810 	  WM_T_82544,		WMP_F_COPPER },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    813 	  "Intel i82540EM 1000BASE-T Ethernet",
    814 	  WM_T_82540,		WMP_F_COPPER },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    817 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    818 	  WM_T_82540,		WMP_F_COPPER },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    821 	  "Intel i82540EP 1000BASE-T Ethernet",
    822 	  WM_T_82540,		WMP_F_COPPER },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    825 	  "Intel i82540EP 1000BASE-T Ethernet",
    826 	  WM_T_82540,		WMP_F_COPPER },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    829 	  "Intel i82540EP 1000BASE-T Ethernet",
    830 	  WM_T_82540,		WMP_F_COPPER },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    833 	  "Intel i82545EM 1000BASE-T Ethernet",
    834 	  WM_T_82545,		WMP_F_COPPER },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    837 	  "Intel i82545GM 1000BASE-T Ethernet",
    838 	  WM_T_82545_3,		WMP_F_COPPER },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    841 	  "Intel i82545GM 1000BASE-X Ethernet",
    842 	  WM_T_82545_3,		WMP_F_FIBER },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    845 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    846 	  WM_T_82545_3,		WMP_F_SERDES },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    849 	  "Intel i82546EB 1000BASE-T Ethernet",
    850 	  WM_T_82546,		WMP_F_COPPER },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    853 	  "Intel i82546EB 1000BASE-T Ethernet",
    854 	  WM_T_82546,		WMP_F_COPPER },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    857 	  "Intel i82545EM 1000BASE-X Ethernet",
    858 	  WM_T_82545,		WMP_F_FIBER },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    861 	  "Intel i82546EB 1000BASE-X Ethernet",
    862 	  WM_T_82546,		WMP_F_FIBER },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    865 	  "Intel i82546GB 1000BASE-T Ethernet",
    866 	  WM_T_82546_3,		WMP_F_COPPER },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    869 	  "Intel i82546GB 1000BASE-X Ethernet",
    870 	  WM_T_82546_3,		WMP_F_FIBER },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    873 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    874 	  WM_T_82546_3,		WMP_F_SERDES },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    877 	  "i82546GB quad-port Gigabit Ethernet",
    878 	  WM_T_82546_3,		WMP_F_COPPER },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    881 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    882 	  WM_T_82546_3,		WMP_F_COPPER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    885 	  "Intel PRO/1000MT (82546GB)",
    886 	  WM_T_82546_3,		WMP_F_COPPER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    889 	  "Intel i82541EI 1000BASE-T Ethernet",
    890 	  WM_T_82541,		WMP_F_COPPER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    893 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    894 	  WM_T_82541,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    897 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    898 	  WM_T_82541,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    901 	  "Intel i82541ER 1000BASE-T Ethernet",
    902 	  WM_T_82541_2,		WMP_F_COPPER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    905 	  "Intel i82541GI 1000BASE-T Ethernet",
    906 	  WM_T_82541_2,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    909 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    910 	  WM_T_82541_2,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    913 	  "Intel i82541PI 1000BASE-T Ethernet",
    914 	  WM_T_82541_2,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    917 	  "Intel i82547EI 1000BASE-T Ethernet",
    918 	  WM_T_82547,		WMP_F_COPPER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    921 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    922 	  WM_T_82547,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    925 	  "Intel i82547GI 1000BASE-T Ethernet",
    926 	  WM_T_82547_2,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    929 	  "Intel PRO/1000 PT (82571EB)",
    930 	  WM_T_82571,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    933 	  "Intel PRO/1000 PF (82571EB)",
    934 	  WM_T_82571,		WMP_F_FIBER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    937 	  "Intel PRO/1000 PB (82571EB)",
    938 	  WM_T_82571,		WMP_F_SERDES },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    941 	  "Intel PRO/1000 QT (82571EB)",
    942 	  WM_T_82571,		WMP_F_COPPER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    945 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    946 	  WM_T_82571,		WMP_F_COPPER, },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    949 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    950 	  WM_T_82571,		WMP_F_COPPER, },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    953 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    954 	  WM_T_82571,		WMP_F_SERDES, },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    957 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    958 	  WM_T_82571,		WMP_F_SERDES, },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    961 	  "Intel 82571EB Quad 1000baseX Ethernet",
    962 	  WM_T_82571,		WMP_F_FIBER, },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    965 	  "Intel i82572EI 1000baseT Ethernet",
    966 	  WM_T_82572,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    969 	  "Intel i82572EI 1000baseX Ethernet",
    970 	  WM_T_82572,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    973 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    974 	  WM_T_82572,		WMP_F_SERDES },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    977 	  "Intel i82572EI 1000baseT Ethernet",
    978 	  WM_T_82572,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    981 	  "Intel i82573E",
    982 	  WM_T_82573,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    985 	  "Intel i82573E IAMT",
    986 	  WM_T_82573,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    989 	  "Intel i82573L Gigabit Ethernet",
    990 	  WM_T_82573,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    993 	  "Intel i82574L",
    994 	  WM_T_82574,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    997 	  "Intel i82574L",
    998 	  WM_T_82574,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1001 	  "Intel i82583V",
   1002 	  WM_T_82583,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1005 	  "i80003 dual 1000baseT Ethernet",
   1006 	  WM_T_80003,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1009 	  "i80003 dual 1000baseX Ethernet",
   1010 	  WM_T_80003,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1013 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1014 	  WM_T_80003,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1017 	  "Intel i80003 1000baseT Ethernet",
   1018 	  WM_T_80003,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1021 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1022 	  WM_T_80003,		WMP_F_SERDES },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1025 	  "Intel i82801H (M_AMT) LAN Controller",
   1026 	  WM_T_ICH8,		WMP_F_COPPER },
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1028 	  "Intel i82801H (AMT) LAN Controller",
   1029 	  WM_T_ICH8,		WMP_F_COPPER },
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1031 	  "Intel i82801H LAN Controller",
   1032 	  WM_T_ICH8,		WMP_F_COPPER },
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1034 	  "Intel i82801H (IFE) LAN Controller",
   1035 	  WM_T_ICH8,		WMP_F_COPPER },
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1037 	  "Intel i82801H (M) LAN Controller",
   1038 	  WM_T_ICH8,		WMP_F_COPPER },
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1040 	  "Intel i82801H IFE (GT) LAN Controller",
   1041 	  WM_T_ICH8,		WMP_F_COPPER },
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1043 	  "Intel i82801H IFE (G) LAN Controller",
   1044 	  WM_T_ICH8,		WMP_F_COPPER },
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1046 	  "82801I (AMT) LAN Controller",
   1047 	  WM_T_ICH9,		WMP_F_COPPER },
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1049 	  "82801I LAN Controller",
   1050 	  WM_T_ICH9,		WMP_F_COPPER },
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1052 	  "82801I (G) LAN Controller",
   1053 	  WM_T_ICH9,		WMP_F_COPPER },
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1055 	  "82801I (GT) LAN Controller",
   1056 	  WM_T_ICH9,		WMP_F_COPPER },
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1058 	  "82801I (C) LAN Controller",
   1059 	  WM_T_ICH9,		WMP_F_COPPER },
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1061 	  "82801I mobile LAN Controller",
   1062 	  WM_T_ICH9,		WMP_F_COPPER },
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1064 	  "82801I mobile (V) LAN Controller",
   1065 	  WM_T_ICH9,		WMP_F_COPPER },
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1067 	  "82801I mobile (AMT) LAN Controller",
   1068 	  WM_T_ICH9,		WMP_F_COPPER },
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1070 	  "82567LM-4 LAN Controller",
   1071 	  WM_T_ICH9,		WMP_F_COPPER },
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1073 	  "82567V-3 LAN Controller",
   1074 	  WM_T_ICH9,		WMP_F_COPPER },
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1076 	  "82567LM-2 LAN Controller",
   1077 	  WM_T_ICH10,		WMP_F_COPPER },
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1079 	  "82567LF-2 LAN Controller",
   1080 	  WM_T_ICH10,		WMP_F_COPPER },
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1082 	  "82567LM-3 LAN Controller",
   1083 	  WM_T_ICH10,		WMP_F_COPPER },
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1085 	  "82567LF-3 LAN Controller",
   1086 	  WM_T_ICH10,		WMP_F_COPPER },
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1088 	  "82567V-2 LAN Controller",
   1089 	  WM_T_ICH10,		WMP_F_COPPER },
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1091 	  "82567V-3? LAN Controller",
   1092 	  WM_T_ICH10,		WMP_F_COPPER },
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1094 	  "HANKSVILLE LAN Controller",
   1095 	  WM_T_ICH10,		WMP_F_COPPER },
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1097 	  "PCH LAN (82577LM) Controller",
   1098 	  WM_T_PCH,		WMP_F_COPPER },
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1100 	  "PCH LAN (82577LC) Controller",
   1101 	  WM_T_PCH,		WMP_F_COPPER },
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1103 	  "PCH LAN (82578DM) Controller",
   1104 	  WM_T_PCH,		WMP_F_COPPER },
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1106 	  "PCH LAN (82578DC) Controller",
   1107 	  WM_T_PCH,		WMP_F_COPPER },
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1109 	  "PCH2 LAN (82579LM) Controller",
   1110 	  WM_T_PCH2,		WMP_F_COPPER },
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1112 	  "PCH2 LAN (82579V) Controller",
   1113 	  WM_T_PCH2,		WMP_F_COPPER },
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1115 	  "82575EB dual-1000baseT Ethernet",
   1116 	  WM_T_82575,		WMP_F_COPPER },
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1118 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1119 	  WM_T_82575,		WMP_F_SERDES },
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1121 	  "82575GB quad-1000baseT Ethernet",
   1122 	  WM_T_82575,		WMP_F_COPPER },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1124 	  "82575GB quad-1000baseT Ethernet (PM)",
   1125 	  WM_T_82575,		WMP_F_COPPER },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1127 	  "82576 1000BaseT Ethernet",
   1128 	  WM_T_82576,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1130 	  "82576 1000BaseX Ethernet",
   1131 	  WM_T_82576,		WMP_F_FIBER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1134 	  "82576 gigabit Ethernet (SERDES)",
   1135 	  WM_T_82576,		WMP_F_SERDES },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1138 	  "82576 quad-1000BaseT Ethernet",
   1139 	  WM_T_82576,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1142 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1143 	  WM_T_82576,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1146 	  "82576 gigabit Ethernet",
   1147 	  WM_T_82576,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1150 	  "82576 gigabit Ethernet (SERDES)",
   1151 	  WM_T_82576,		WMP_F_SERDES },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1153 	  "82576 quad-gigabit Ethernet (SERDES)",
   1154 	  WM_T_82576,		WMP_F_SERDES },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1157 	  "82580 1000BaseT Ethernet",
   1158 	  WM_T_82580,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1160 	  "82580 1000BaseX Ethernet",
   1161 	  WM_T_82580,		WMP_F_FIBER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1164 	  "82580 1000BaseT Ethernet (SERDES)",
   1165 	  WM_T_82580,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1168 	  "82580 gigabit Ethernet (SGMII)",
   1169 	  WM_T_82580,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1171 	  "82580 dual-1000BaseT Ethernet",
   1172 	  WM_T_82580,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1175 	  "82580 quad-1000BaseX Ethernet",
   1176 	  WM_T_82580,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1179 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1180 	  WM_T_82580,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1183 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1184 	  WM_T_82580,		WMP_F_SERDES },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1187 	  "DH89XXCC 1000BASE-KX Ethernet",
   1188 	  WM_T_82580,		WMP_F_SERDES },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1191 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1192 	  WM_T_82580,		WMP_F_SERDES },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1195 	  "I350 Gigabit Network Connection",
   1196 	  WM_T_I350,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1199 	  "I350 Gigabit Fiber Network Connection",
   1200 	  WM_T_I350,		WMP_F_FIBER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1203 	  "I350 Gigabit Backplane Connection",
   1204 	  WM_T_I350,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1207 	  "I350 Quad Port Gigabit Ethernet",
   1208 	  WM_T_I350,		WMP_F_SERDES },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1211 	  "I350 Gigabit Connection",
   1212 	  WM_T_I350,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1215 	  "I354 Gigabit Ethernet (KX)",
   1216 	  WM_T_I354,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1219 	  "I354 Gigabit Ethernet (SGMII)",
   1220 	  WM_T_I354,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1223 	  "I354 Gigabit Ethernet (2.5G)",
   1224 	  WM_T_I354,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1227 	  "I210-T1 Ethernet Server Adapter",
   1228 	  WM_T_I210,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1231 	  "I210 Ethernet (Copper OEM)",
   1232 	  WM_T_I210,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1235 	  "I210 Ethernet (Copper IT)",
   1236 	  WM_T_I210,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1239 	  "I210 Ethernet (FLASH less)",
   1240 	  WM_T_I210,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1243 	  "I210 Gigabit Ethernet (Fiber)",
   1244 	  WM_T_I210,		WMP_F_FIBER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1247 	  "I210 Gigabit Ethernet (SERDES)",
   1248 	  WM_T_I210,		WMP_F_SERDES },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1251 	  "I210 Gigabit Ethernet (FLASH less)",
   1252 	  WM_T_I210,		WMP_F_SERDES },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1255 	  "I210 Gigabit Ethernet (SGMII)",
   1256 	  WM_T_I210,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1259 	  "I211 Ethernet (COPPER)",
   1260 	  WM_T_I211,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1262 	  "I217 V Ethernet Connection",
   1263 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1265 	  "I217 LM Ethernet Connection",
   1266 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1268 	  "I218 V Ethernet Connection",
   1269 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1271 	  "I218 V Ethernet Connection",
   1272 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1274 	  "I218 V Ethernet Connection",
   1275 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1277 	  "I218 LM Ethernet Connection",
   1278 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1280 	  "I218 LM Ethernet Connection",
   1281 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1283 	  "I218 LM Ethernet Connection",
   1284 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1285 	{ 0,			0,
   1286 	  NULL,
   1287 	  0,			0 },
   1288 };
   1289 
   1290 #ifdef WM_EVENT_COUNTERS
   1291 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1292 #endif /* WM_EVENT_COUNTERS */
   1293 
   1294 
   1295 /*
   1296  * Register read/write functions.
   1297  * Other than CSR_{READ|WRITE}().
   1298  */
   1299 
   1300 #if 0 /* Not currently used */
   1301 static inline uint32_t
   1302 wm_io_read(struct wm_softc *sc, int reg)
   1303 {
   1304 
   1305 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1306 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1307 }
   1308 #endif
   1309 
   1310 static inline void
   1311 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1312 {
   1313 
   1314 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1315 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1316 }
   1317 
   1318 static inline void
   1319 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1320     uint32_t data)
   1321 {
   1322 	uint32_t regval;
   1323 	int i;
   1324 
   1325 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1326 
   1327 	CSR_WRITE(sc, reg, regval);
   1328 
   1329 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1330 		delay(5);
   1331 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1332 			break;
   1333 	}
   1334 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1335 		aprint_error("%s: WARNING:"
   1336 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1337 		    device_xname(sc->sc_dev), reg);
   1338 	}
   1339 }
   1340 
   1341 static inline void
   1342 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1343 {
   1344 	wa->wa_low = htole32(v & 0xffffffffU);
   1345 	if (sizeof(bus_addr_t) == 8)
   1346 		wa->wa_high = htole32((uint64_t) v >> 32);
   1347 	else
   1348 		wa->wa_high = 0;
   1349 }
   1350 
   1351 /*
   1352  * Descriptor sync/init functions.
   1353  */
   1354 static inline void
   1355 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1356 {
   1357 	struct wm_softc *sc = txq->txq_sc;
   1358 
   1359 	/* If it will wrap around, sync to the end of the ring. */
   1360 	if ((start + num) > WM_NTXDESC(txq)) {
   1361 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1362 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1363 		    (WM_NTXDESC(txq) - start), ops);
   1364 		num -= (WM_NTXDESC(txq) - start);
   1365 		start = 0;
   1366 	}
   1367 
   1368 	/* Now sync whatever is left. */
   1369 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1370 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1371 }
   1372 
   1373 static inline void
   1374 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1375 {
   1376 	struct wm_softc *sc = rxq->rxq_sc;
   1377 
   1378 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1379 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1380 }
   1381 
   1382 static inline void
   1383 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1384 {
   1385 	struct wm_softc *sc = rxq->rxq_sc;
   1386 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1387 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1388 	struct mbuf *m = rxs->rxs_mbuf;
   1389 
   1390 	/*
   1391 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1392 	 * so that the payload after the Ethernet header is aligned
   1393 	 * to a 4-byte boundary.
   1394 
   1395 	 * XXX BRAINDAMAGE ALERT!
   1396 	 * The stupid chip uses the same size for every buffer, which
   1397 	 * is set in the Receive Control register.  We are using the 2K
   1398 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1399 	 * reason, we can't "scoot" packets longer than the standard
   1400 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1401 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1402 	 * the upper layer copy the headers.
   1403 	 */
   1404 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1405 
   1406 	wm_set_dma_addr(&rxd->wrx_addr,
   1407 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1408 	rxd->wrx_len = 0;
   1409 	rxd->wrx_cksum = 0;
   1410 	rxd->wrx_status = 0;
   1411 	rxd->wrx_errors = 0;
   1412 	rxd->wrx_special = 0;
   1413 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1414 
   1415 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1416 }
   1417 
   1418 /*
   1419  * Device driver interface functions and commonly used functions.
   1420  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1421  */
   1422 
   1423 /* Lookup supported device table */
   1424 static const struct wm_product *
   1425 wm_lookup(const struct pci_attach_args *pa)
   1426 {
   1427 	const struct wm_product *wmp;
   1428 
   1429 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1430 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1431 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1432 			return wmp;
   1433 	}
   1434 	return NULL;
   1435 }
   1436 
   1437 /* The match function (ca_match) */
   1438 static int
   1439 wm_match(device_t parent, cfdata_t cf, void *aux)
   1440 {
   1441 	struct pci_attach_args *pa = aux;
   1442 
   1443 	if (wm_lookup(pa) != NULL)
   1444 		return 1;
   1445 
   1446 	return 0;
   1447 }
   1448 
   1449 /* The attach function (ca_attach) */
   1450 static void
   1451 wm_attach(device_t parent, device_t self, void *aux)
   1452 {
   1453 	struct wm_softc *sc = device_private(self);
   1454 	struct pci_attach_args *pa = aux;
   1455 	prop_dictionary_t dict;
   1456 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1457 	pci_chipset_tag_t pc = pa->pa_pc;
   1458 #ifndef WM_MSI_MSIX
   1459 	pci_intr_handle_t ih;
   1460 	const char *intrstr = NULL;
   1461 	char intrbuf[PCI_INTRSTR_LEN];
   1462 #else
   1463 	int counts[PCI_INTR_TYPE_SIZE];
   1464 	pci_intr_type_t max_type;
   1465 #endif
   1466 	const char *eetype, *xname;
   1467 	bus_space_tag_t memt;
   1468 	bus_space_handle_t memh;
   1469 	bus_size_t memsize;
   1470 	int memh_valid;
   1471 	int i, error;
   1472 	const struct wm_product *wmp;
   1473 	prop_data_t ea;
   1474 	prop_number_t pn;
   1475 	uint8_t enaddr[ETHER_ADDR_LEN];
   1476 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1477 	pcireg_t preg, memtype;
   1478 	uint16_t eeprom_data, apme_mask;
   1479 	bool force_clear_smbi;
   1480 	uint32_t link_mode;
   1481 	uint32_t reg;
   1482 
   1483 	sc->sc_dev = self;
   1484 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1485 	sc->sc_stopping = false;
   1486 
   1487 	wmp = wm_lookup(pa);
   1488 #ifdef DIAGNOSTIC
   1489 	if (wmp == NULL) {
   1490 		printf("\n");
   1491 		panic("wm_attach: impossible");
   1492 	}
   1493 #endif
   1494 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1495 
   1496 	sc->sc_pc = pa->pa_pc;
   1497 	sc->sc_pcitag = pa->pa_tag;
   1498 
   1499 	if (pci_dma64_available(pa))
   1500 		sc->sc_dmat = pa->pa_dmat64;
   1501 	else
   1502 		sc->sc_dmat = pa->pa_dmat;
   1503 
   1504 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1505 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1506 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1507 
   1508 	sc->sc_type = wmp->wmp_type;
   1509 	if (sc->sc_type < WM_T_82543) {
   1510 		if (sc->sc_rev < 2) {
   1511 			aprint_error_dev(sc->sc_dev,
   1512 			    "i82542 must be at least rev. 2\n");
   1513 			return;
   1514 		}
   1515 		if (sc->sc_rev < 3)
   1516 			sc->sc_type = WM_T_82542_2_0;
   1517 	}
   1518 
   1519 	/*
   1520 	 * Disable MSI for Errata:
   1521 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1522 	 *
   1523 	 *  82544: Errata 25
   1524 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1525 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1526 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1527 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1528 	 *
   1529 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1530 	 *
   1531 	 *  82571 & 82572: Errata 63
   1532 	 */
   1533 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1534 	    || (sc->sc_type == WM_T_82572))
   1535 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1536 
   1537 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1538 	    || (sc->sc_type == WM_T_82580)
   1539 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1540 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1541 		sc->sc_flags |= WM_F_NEWQUEUE;
   1542 
   1543 	/* Set device properties (mactype) */
   1544 	dict = device_properties(sc->sc_dev);
   1545 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1546 
   1547 	/*
   1548 	 * Map the device.  All devices support memory-mapped acccess,
   1549 	 * and it is really required for normal operation.
   1550 	 */
   1551 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1552 	switch (memtype) {
   1553 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1554 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1555 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1556 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1557 		break;
   1558 	default:
   1559 		memh_valid = 0;
   1560 		break;
   1561 	}
   1562 
   1563 	if (memh_valid) {
   1564 		sc->sc_st = memt;
   1565 		sc->sc_sh = memh;
   1566 		sc->sc_ss = memsize;
   1567 	} else {
   1568 		aprint_error_dev(sc->sc_dev,
   1569 		    "unable to map device registers\n");
   1570 		return;
   1571 	}
   1572 
   1573 	/*
   1574 	 * In addition, i82544 and later support I/O mapped indirect
   1575 	 * register access.  It is not desirable (nor supported in
   1576 	 * this driver) to use it for normal operation, though it is
   1577 	 * required to work around bugs in some chip versions.
   1578 	 */
   1579 	if (sc->sc_type >= WM_T_82544) {
   1580 		/* First we have to find the I/O BAR. */
   1581 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1582 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1583 			if (memtype == PCI_MAPREG_TYPE_IO)
   1584 				break;
   1585 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1586 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1587 				i += 4;	/* skip high bits, too */
   1588 		}
   1589 		if (i < PCI_MAPREG_END) {
   1590 			/*
   1591 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1592 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1593 			 * It's no problem because newer chips has no this
   1594 			 * bug.
   1595 			 *
   1596 			 * The i8254x doesn't apparently respond when the
   1597 			 * I/O BAR is 0, which looks somewhat like it's not
   1598 			 * been configured.
   1599 			 */
   1600 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1601 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1602 				aprint_error_dev(sc->sc_dev,
   1603 				    "WARNING: I/O BAR at zero.\n");
   1604 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1605 					0, &sc->sc_iot, &sc->sc_ioh,
   1606 					NULL, &sc->sc_ios) == 0) {
   1607 				sc->sc_flags |= WM_F_IOH_VALID;
   1608 			} else {
   1609 				aprint_error_dev(sc->sc_dev,
   1610 				    "WARNING: unable to map I/O space\n");
   1611 			}
   1612 		}
   1613 
   1614 	}
   1615 
   1616 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1617 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1618 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1619 	if (sc->sc_type < WM_T_82542_2_1)
   1620 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1621 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1622 
   1623 	/* power up chip */
   1624 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1625 	    NULL)) && error != EOPNOTSUPP) {
   1626 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1627 		return;
   1628 	}
   1629 
   1630 	/* XXX Currently, Tx, Rx queue are always one. */
   1631 	sc->sc_nrxqueues = 1;
   1632 	sc->sc_ntxqueues = 1;
   1633 	error = wm_alloc_txrx_queues(sc);
   1634 	if (error)
   1635 		return;
   1636 
   1637 #ifndef WM_MSI_MSIX
   1638 	/*
   1639 	 * Map and establish our interrupt.
   1640 	 */
   1641 	if (pci_intr_map(pa, &ih)) {
   1642 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1643 		return;
   1644 	}
   1645 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1646 #ifdef WM_MPSAFE
   1647 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1648 #endif
   1649 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
   1650 	    wm_intr_legacy, sc, device_xname(sc->sc_dev));
   1651 	if (sc->sc_ihs[0] == NULL) {
   1652 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1653 		if (intrstr != NULL)
   1654 			aprint_error(" at %s", intrstr);
   1655 		aprint_error("\n");
   1656 		return;
   1657 	}
   1658 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1659 	sc->sc_nintrs = 1;
   1660 #else /* WM_MSI_MSIX */
   1661 	/* Allocation settings */
   1662 	max_type = PCI_INTR_TYPE_MSIX;
   1663 	counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
   1664 	counts[PCI_INTR_TYPE_MSI] = 1;
   1665 	counts[PCI_INTR_TYPE_INTX] = 1;
   1666 
   1667 alloc_retry:
   1668 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1669 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1670 		return;
   1671 	}
   1672 
   1673 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1674 		error = wm_setup_msix(sc);
   1675 		if (error) {
   1676 			pci_intr_release(pc, sc->sc_intrs,
   1677 			    counts[PCI_INTR_TYPE_MSIX]);
   1678 
   1679 			/* Setup for MSI: Disable MSI-X */
   1680 			max_type = PCI_INTR_TYPE_MSI;
   1681 			counts[PCI_INTR_TYPE_MSI] = 1;
   1682 			counts[PCI_INTR_TYPE_INTX] = 1;
   1683 			goto alloc_retry;
   1684 		}
   1685 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1686 		error = wm_setup_legacy(sc);
   1687 		if (error) {
   1688 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1689 			    counts[PCI_INTR_TYPE_MSI]);
   1690 
   1691 			/* The next try is for INTx: Disable MSI */
   1692 			max_type = PCI_INTR_TYPE_INTX;
   1693 			counts[PCI_INTR_TYPE_INTX] = 1;
   1694 			goto alloc_retry;
   1695 		}
   1696 	} else {
   1697 		error = wm_setup_legacy(sc);
   1698 		if (error) {
   1699 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1700 			    counts[PCI_INTR_TYPE_INTX]);
   1701 			return;
   1702 		}
   1703 	}
   1704 #endif /* WM_MSI_MSIX */
   1705 
   1706 	/*
   1707 	 * Check the function ID (unit number of the chip).
   1708 	 */
   1709 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1710 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1711 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1712 	    || (sc->sc_type == WM_T_82580)
   1713 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1714 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1715 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1716 	else
   1717 		sc->sc_funcid = 0;
   1718 
   1719 	/*
   1720 	 * Determine a few things about the bus we're connected to.
   1721 	 */
   1722 	if (sc->sc_type < WM_T_82543) {
   1723 		/* We don't really know the bus characteristics here. */
   1724 		sc->sc_bus_speed = 33;
   1725 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1726 		/*
   1727 		 * CSA (Communication Streaming Architecture) is about as fast
   1728 		 * a 32-bit 66MHz PCI Bus.
   1729 		 */
   1730 		sc->sc_flags |= WM_F_CSA;
   1731 		sc->sc_bus_speed = 66;
   1732 		aprint_verbose_dev(sc->sc_dev,
   1733 		    "Communication Streaming Architecture\n");
   1734 		if (sc->sc_type == WM_T_82547) {
   1735 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1736 			callout_setfunc(&sc->sc_txfifo_ch,
   1737 					wm_82547_txfifo_stall, sc);
   1738 			aprint_verbose_dev(sc->sc_dev,
   1739 			    "using 82547 Tx FIFO stall work-around\n");
   1740 		}
   1741 	} else if (sc->sc_type >= WM_T_82571) {
   1742 		sc->sc_flags |= WM_F_PCIE;
   1743 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1744 		    && (sc->sc_type != WM_T_ICH10)
   1745 		    && (sc->sc_type != WM_T_PCH)
   1746 		    && (sc->sc_type != WM_T_PCH2)
   1747 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1748 			/* ICH* and PCH* have no PCIe capability registers */
   1749 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1750 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1751 				NULL) == 0)
   1752 				aprint_error_dev(sc->sc_dev,
   1753 				    "unable to find PCIe capability\n");
   1754 		}
   1755 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1756 	} else {
   1757 		reg = CSR_READ(sc, WMREG_STATUS);
   1758 		if (reg & STATUS_BUS64)
   1759 			sc->sc_flags |= WM_F_BUS64;
   1760 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1761 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1762 
   1763 			sc->sc_flags |= WM_F_PCIX;
   1764 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1765 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1766 				aprint_error_dev(sc->sc_dev,
   1767 				    "unable to find PCIX capability\n");
   1768 			else if (sc->sc_type != WM_T_82545_3 &&
   1769 				 sc->sc_type != WM_T_82546_3) {
   1770 				/*
   1771 				 * Work around a problem caused by the BIOS
   1772 				 * setting the max memory read byte count
   1773 				 * incorrectly.
   1774 				 */
   1775 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1776 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1777 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1778 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1779 
   1780 				bytecnt =
   1781 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1782 				    PCIX_CMD_BYTECNT_SHIFT;
   1783 				maxb =
   1784 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1785 				    PCIX_STATUS_MAXB_SHIFT;
   1786 				if (bytecnt > maxb) {
   1787 					aprint_verbose_dev(sc->sc_dev,
   1788 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1789 					    512 << bytecnt, 512 << maxb);
   1790 					pcix_cmd = (pcix_cmd &
   1791 					    ~PCIX_CMD_BYTECNT_MASK) |
   1792 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1793 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1794 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1795 					    pcix_cmd);
   1796 				}
   1797 			}
   1798 		}
   1799 		/*
   1800 		 * The quad port adapter is special; it has a PCIX-PCIX
   1801 		 * bridge on the board, and can run the secondary bus at
   1802 		 * a higher speed.
   1803 		 */
   1804 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1805 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1806 								      : 66;
   1807 		} else if (sc->sc_flags & WM_F_PCIX) {
   1808 			switch (reg & STATUS_PCIXSPD_MASK) {
   1809 			case STATUS_PCIXSPD_50_66:
   1810 				sc->sc_bus_speed = 66;
   1811 				break;
   1812 			case STATUS_PCIXSPD_66_100:
   1813 				sc->sc_bus_speed = 100;
   1814 				break;
   1815 			case STATUS_PCIXSPD_100_133:
   1816 				sc->sc_bus_speed = 133;
   1817 				break;
   1818 			default:
   1819 				aprint_error_dev(sc->sc_dev,
   1820 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1821 				    reg & STATUS_PCIXSPD_MASK);
   1822 				sc->sc_bus_speed = 66;
   1823 				break;
   1824 			}
   1825 		} else
   1826 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1827 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1828 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1829 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1830 	}
   1831 
   1832 	/* clear interesting stat counters */
   1833 	CSR_READ(sc, WMREG_COLC);
   1834 	CSR_READ(sc, WMREG_RXERRC);
   1835 
   1836 	/* get PHY control from SMBus to PCIe */
   1837 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1838 	    || (sc->sc_type == WM_T_PCH_LPT))
   1839 		wm_smbustopci(sc);
   1840 
   1841 	/* Reset the chip to a known state. */
   1842 	wm_reset(sc);
   1843 
   1844 	/* Get some information about the EEPROM. */
   1845 	switch (sc->sc_type) {
   1846 	case WM_T_82542_2_0:
   1847 	case WM_T_82542_2_1:
   1848 	case WM_T_82543:
   1849 	case WM_T_82544:
   1850 		/* Microwire */
   1851 		sc->sc_nvm_wordsize = 64;
   1852 		sc->sc_nvm_addrbits = 6;
   1853 		break;
   1854 	case WM_T_82540:
   1855 	case WM_T_82545:
   1856 	case WM_T_82545_3:
   1857 	case WM_T_82546:
   1858 	case WM_T_82546_3:
   1859 		/* Microwire */
   1860 		reg = CSR_READ(sc, WMREG_EECD);
   1861 		if (reg & EECD_EE_SIZE) {
   1862 			sc->sc_nvm_wordsize = 256;
   1863 			sc->sc_nvm_addrbits = 8;
   1864 		} else {
   1865 			sc->sc_nvm_wordsize = 64;
   1866 			sc->sc_nvm_addrbits = 6;
   1867 		}
   1868 		sc->sc_flags |= WM_F_LOCK_EECD;
   1869 		break;
   1870 	case WM_T_82541:
   1871 	case WM_T_82541_2:
   1872 	case WM_T_82547:
   1873 	case WM_T_82547_2:
   1874 		sc->sc_flags |= WM_F_LOCK_EECD;
   1875 		reg = CSR_READ(sc, WMREG_EECD);
   1876 		if (reg & EECD_EE_TYPE) {
   1877 			/* SPI */
   1878 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1879 			wm_nvm_set_addrbits_size_eecd(sc);
   1880 		} else {
   1881 			/* Microwire */
   1882 			if ((reg & EECD_EE_ABITS) != 0) {
   1883 				sc->sc_nvm_wordsize = 256;
   1884 				sc->sc_nvm_addrbits = 8;
   1885 			} else {
   1886 				sc->sc_nvm_wordsize = 64;
   1887 				sc->sc_nvm_addrbits = 6;
   1888 			}
   1889 		}
   1890 		break;
   1891 	case WM_T_82571:
   1892 	case WM_T_82572:
   1893 		/* SPI */
   1894 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1895 		wm_nvm_set_addrbits_size_eecd(sc);
   1896 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1897 		break;
   1898 	case WM_T_82573:
   1899 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1900 		/* FALLTHROUGH */
   1901 	case WM_T_82574:
   1902 	case WM_T_82583:
   1903 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1904 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1905 			sc->sc_nvm_wordsize = 2048;
   1906 		} else {
   1907 			/* SPI */
   1908 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1909 			wm_nvm_set_addrbits_size_eecd(sc);
   1910 		}
   1911 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1912 		break;
   1913 	case WM_T_82575:
   1914 	case WM_T_82576:
   1915 	case WM_T_82580:
   1916 	case WM_T_I350:
   1917 	case WM_T_I354:
   1918 	case WM_T_80003:
   1919 		/* SPI */
   1920 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1921 		wm_nvm_set_addrbits_size_eecd(sc);
   1922 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1923 		    | WM_F_LOCK_SWSM;
   1924 		break;
   1925 	case WM_T_ICH8:
   1926 	case WM_T_ICH9:
   1927 	case WM_T_ICH10:
   1928 	case WM_T_PCH:
   1929 	case WM_T_PCH2:
   1930 	case WM_T_PCH_LPT:
   1931 		/* FLASH */
   1932 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1933 		sc->sc_nvm_wordsize = 2048;
   1934 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1935 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1936 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1937 			aprint_error_dev(sc->sc_dev,
   1938 			    "can't map FLASH registers\n");
   1939 			goto out;
   1940 		}
   1941 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1942 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1943 						ICH_FLASH_SECTOR_SIZE;
   1944 		sc->sc_ich8_flash_bank_size =
   1945 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1946 		sc->sc_ich8_flash_bank_size -=
   1947 		    (reg & ICH_GFPREG_BASE_MASK);
   1948 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1949 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1950 		break;
   1951 	case WM_T_I210:
   1952 	case WM_T_I211:
   1953 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1954 			wm_nvm_set_addrbits_size_eecd(sc);
   1955 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1956 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1957 		} else {
   1958 			sc->sc_nvm_wordsize = INVM_SIZE;
   1959 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1960 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1961 		}
   1962 		break;
   1963 	default:
   1964 		break;
   1965 	}
   1966 
   1967 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1968 	switch (sc->sc_type) {
   1969 	case WM_T_82571:
   1970 	case WM_T_82572:
   1971 		reg = CSR_READ(sc, WMREG_SWSM2);
   1972 		if ((reg & SWSM2_LOCK) == 0) {
   1973 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1974 			force_clear_smbi = true;
   1975 		} else
   1976 			force_clear_smbi = false;
   1977 		break;
   1978 	case WM_T_82573:
   1979 	case WM_T_82574:
   1980 	case WM_T_82583:
   1981 		force_clear_smbi = true;
   1982 		break;
   1983 	default:
   1984 		force_clear_smbi = false;
   1985 		break;
   1986 	}
   1987 	if (force_clear_smbi) {
   1988 		reg = CSR_READ(sc, WMREG_SWSM);
   1989 		if ((reg & SWSM_SMBI) != 0)
   1990 			aprint_error_dev(sc->sc_dev,
   1991 			    "Please update the Bootagent\n");
   1992 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1993 	}
   1994 
   1995 	/*
   1996 	 * Defer printing the EEPROM type until after verifying the checksum
   1997 	 * This allows the EEPROM type to be printed correctly in the case
   1998 	 * that no EEPROM is attached.
   1999 	 */
   2000 	/*
   2001 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2002 	 * this for later, so we can fail future reads from the EEPROM.
   2003 	 */
   2004 	if (wm_nvm_validate_checksum(sc)) {
   2005 		/*
   2006 		 * Read twice again because some PCI-e parts fail the
   2007 		 * first check due to the link being in sleep state.
   2008 		 */
   2009 		if (wm_nvm_validate_checksum(sc))
   2010 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2011 	}
   2012 
   2013 	/* Set device properties (macflags) */
   2014 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2015 
   2016 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2017 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2018 	else {
   2019 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2020 		    sc->sc_nvm_wordsize);
   2021 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2022 			aprint_verbose("iNVM");
   2023 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2024 			aprint_verbose("FLASH(HW)");
   2025 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2026 			aprint_verbose("FLASH");
   2027 		else {
   2028 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2029 				eetype = "SPI";
   2030 			else
   2031 				eetype = "MicroWire";
   2032 			aprint_verbose("(%d address bits) %s EEPROM",
   2033 			    sc->sc_nvm_addrbits, eetype);
   2034 		}
   2035 	}
   2036 	wm_nvm_version(sc);
   2037 	aprint_verbose("\n");
   2038 
   2039 	/* Check for I21[01] PLL workaround */
   2040 	if (sc->sc_type == WM_T_I210)
   2041 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2042 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2043 		/* NVM image release 3.25 has a workaround */
   2044 		if ((sc->sc_nvm_ver_major < 3)
   2045 		    || ((sc->sc_nvm_ver_major == 3)
   2046 			&& (sc->sc_nvm_ver_minor < 25))) {
   2047 			aprint_verbose_dev(sc->sc_dev,
   2048 			    "ROM image version %d.%d is older than 3.25\n",
   2049 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2050 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2051 		}
   2052 	}
   2053 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2054 		wm_pll_workaround_i210(sc);
   2055 
   2056 	switch (sc->sc_type) {
   2057 	case WM_T_82571:
   2058 	case WM_T_82572:
   2059 	case WM_T_82573:
   2060 	case WM_T_82574:
   2061 	case WM_T_82583:
   2062 	case WM_T_80003:
   2063 	case WM_T_ICH8:
   2064 	case WM_T_ICH9:
   2065 	case WM_T_ICH10:
   2066 	case WM_T_PCH:
   2067 	case WM_T_PCH2:
   2068 	case WM_T_PCH_LPT:
   2069 		if (wm_check_mng_mode(sc) != 0)
   2070 			wm_get_hw_control(sc);
   2071 		break;
   2072 	default:
   2073 		break;
   2074 	}
   2075 	wm_get_wakeup(sc);
   2076 	/*
   2077 	 * Read the Ethernet address from the EEPROM, if not first found
   2078 	 * in device properties.
   2079 	 */
   2080 	ea = prop_dictionary_get(dict, "mac-address");
   2081 	if (ea != NULL) {
   2082 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2083 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2084 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2085 	} else {
   2086 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2087 			aprint_error_dev(sc->sc_dev,
   2088 			    "unable to read Ethernet address\n");
   2089 			goto out;
   2090 		}
   2091 	}
   2092 
   2093 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2094 	    ether_sprintf(enaddr));
   2095 
   2096 	/*
   2097 	 * Read the config info from the EEPROM, and set up various
   2098 	 * bits in the control registers based on their contents.
   2099 	 */
   2100 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2101 	if (pn != NULL) {
   2102 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2103 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2104 	} else {
   2105 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2106 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2107 			goto out;
   2108 		}
   2109 	}
   2110 
   2111 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2112 	if (pn != NULL) {
   2113 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2114 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2115 	} else {
   2116 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2117 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2118 			goto out;
   2119 		}
   2120 	}
   2121 
   2122 	/* check for WM_F_WOL */
   2123 	switch (sc->sc_type) {
   2124 	case WM_T_82542_2_0:
   2125 	case WM_T_82542_2_1:
   2126 	case WM_T_82543:
   2127 		/* dummy? */
   2128 		eeprom_data = 0;
   2129 		apme_mask = NVM_CFG3_APME;
   2130 		break;
   2131 	case WM_T_82544:
   2132 		apme_mask = NVM_CFG2_82544_APM_EN;
   2133 		eeprom_data = cfg2;
   2134 		break;
   2135 	case WM_T_82546:
   2136 	case WM_T_82546_3:
   2137 	case WM_T_82571:
   2138 	case WM_T_82572:
   2139 	case WM_T_82573:
   2140 	case WM_T_82574:
   2141 	case WM_T_82583:
   2142 	case WM_T_80003:
   2143 	default:
   2144 		apme_mask = NVM_CFG3_APME;
   2145 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2146 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2147 		break;
   2148 	case WM_T_82575:
   2149 	case WM_T_82576:
   2150 	case WM_T_82580:
   2151 	case WM_T_I350:
   2152 	case WM_T_I354: /* XXX ok? */
   2153 	case WM_T_ICH8:
   2154 	case WM_T_ICH9:
   2155 	case WM_T_ICH10:
   2156 	case WM_T_PCH:
   2157 	case WM_T_PCH2:
   2158 	case WM_T_PCH_LPT:
   2159 		/* XXX The funcid should be checked on some devices */
   2160 		apme_mask = WUC_APME;
   2161 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2162 		break;
   2163 	}
   2164 
   2165 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2166 	if ((eeprom_data & apme_mask) != 0)
   2167 		sc->sc_flags |= WM_F_WOL;
   2168 #ifdef WM_DEBUG
   2169 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2170 		printf("WOL\n");
   2171 #endif
   2172 
   2173 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2174 		/* Check NVM for autonegotiation */
   2175 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2176 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2177 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2178 		}
   2179 	}
   2180 
   2181 	/*
   2182 	 * XXX need special handling for some multiple port cards
   2183 	 * to disable a paticular port.
   2184 	 */
   2185 
   2186 	if (sc->sc_type >= WM_T_82544) {
   2187 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2188 		if (pn != NULL) {
   2189 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2190 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2191 		} else {
   2192 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2193 				aprint_error_dev(sc->sc_dev,
   2194 				    "unable to read SWDPIN\n");
   2195 				goto out;
   2196 			}
   2197 		}
   2198 	}
   2199 
   2200 	if (cfg1 & NVM_CFG1_ILOS)
   2201 		sc->sc_ctrl |= CTRL_ILOS;
   2202 
   2203 	/*
   2204 	 * XXX
   2205 	 * This code isn't correct because pin 2 and 3 are located
   2206 	 * in different position on newer chips. Check all datasheet.
   2207 	 *
   2208 	 * Until resolve this problem, check if a chip < 82580
   2209 	 */
   2210 	if (sc->sc_type <= WM_T_82580) {
   2211 		if (sc->sc_type >= WM_T_82544) {
   2212 			sc->sc_ctrl |=
   2213 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2214 			    CTRL_SWDPIO_SHIFT;
   2215 			sc->sc_ctrl |=
   2216 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2217 			    CTRL_SWDPINS_SHIFT;
   2218 		} else {
   2219 			sc->sc_ctrl |=
   2220 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2221 			    CTRL_SWDPIO_SHIFT;
   2222 		}
   2223 	}
   2224 
   2225 	/* XXX For other than 82580? */
   2226 	if (sc->sc_type == WM_T_82580) {
   2227 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2228 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2229 		if (nvmword & __BIT(13)) {
   2230 			printf("SET ILOS\n");
   2231 			sc->sc_ctrl |= CTRL_ILOS;
   2232 		}
   2233 	}
   2234 
   2235 #if 0
   2236 	if (sc->sc_type >= WM_T_82544) {
   2237 		if (cfg1 & NVM_CFG1_IPS0)
   2238 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2239 		if (cfg1 & NVM_CFG1_IPS1)
   2240 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2241 		sc->sc_ctrl_ext |=
   2242 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2243 		    CTRL_EXT_SWDPIO_SHIFT;
   2244 		sc->sc_ctrl_ext |=
   2245 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2246 		    CTRL_EXT_SWDPINS_SHIFT;
   2247 	} else {
   2248 		sc->sc_ctrl_ext |=
   2249 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2250 		    CTRL_EXT_SWDPIO_SHIFT;
   2251 	}
   2252 #endif
   2253 
   2254 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2255 #if 0
   2256 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2257 #endif
   2258 
   2259 	if (sc->sc_type == WM_T_PCH) {
   2260 		uint16_t val;
   2261 
   2262 		/* Save the NVM K1 bit setting */
   2263 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2264 
   2265 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2266 			sc->sc_nvm_k1_enabled = 1;
   2267 		else
   2268 			sc->sc_nvm_k1_enabled = 0;
   2269 	}
   2270 
   2271 	/*
   2272 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2273 	 * media structures accordingly.
   2274 	 */
   2275 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2276 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2277 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2278 	    || sc->sc_type == WM_T_82573
   2279 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2280 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2281 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2282 	} else if (sc->sc_type < WM_T_82543 ||
   2283 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2284 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2285 			aprint_error_dev(sc->sc_dev,
   2286 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2287 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2288 		}
   2289 		wm_tbi_mediainit(sc);
   2290 	} else {
   2291 		switch (sc->sc_type) {
   2292 		case WM_T_82575:
   2293 		case WM_T_82576:
   2294 		case WM_T_82580:
   2295 		case WM_T_I350:
   2296 		case WM_T_I354:
   2297 		case WM_T_I210:
   2298 		case WM_T_I211:
   2299 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2300 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2301 			switch (link_mode) {
   2302 			case CTRL_EXT_LINK_MODE_1000KX:
   2303 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2304 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2305 				break;
   2306 			case CTRL_EXT_LINK_MODE_SGMII:
   2307 				if (wm_sgmii_uses_mdio(sc)) {
   2308 					aprint_verbose_dev(sc->sc_dev,
   2309 					    "SGMII(MDIO)\n");
   2310 					sc->sc_flags |= WM_F_SGMII;
   2311 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2312 					break;
   2313 				}
   2314 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2315 				/*FALLTHROUGH*/
   2316 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2317 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2318 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2319 					if (link_mode
   2320 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2321 						sc->sc_mediatype
   2322 						    = WM_MEDIATYPE_COPPER;
   2323 						sc->sc_flags |= WM_F_SGMII;
   2324 					} else {
   2325 						sc->sc_mediatype
   2326 						    = WM_MEDIATYPE_SERDES;
   2327 						aprint_verbose_dev(sc->sc_dev,
   2328 						    "SERDES\n");
   2329 					}
   2330 					break;
   2331 				}
   2332 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2333 					aprint_verbose_dev(sc->sc_dev,
   2334 					    "SERDES\n");
   2335 
   2336 				/* Change current link mode setting */
   2337 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2338 				switch (sc->sc_mediatype) {
   2339 				case WM_MEDIATYPE_COPPER:
   2340 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2341 					break;
   2342 				case WM_MEDIATYPE_SERDES:
   2343 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2344 					break;
   2345 				default:
   2346 					break;
   2347 				}
   2348 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2349 				break;
   2350 			case CTRL_EXT_LINK_MODE_GMII:
   2351 			default:
   2352 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2353 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2354 				break;
   2355 			}
   2356 
   2357 			reg &= ~CTRL_EXT_I2C_ENA;
   2358 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2359 				reg |= CTRL_EXT_I2C_ENA;
   2360 			else
   2361 				reg &= ~CTRL_EXT_I2C_ENA;
   2362 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2363 
   2364 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2365 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2366 			else
   2367 				wm_tbi_mediainit(sc);
   2368 			break;
   2369 		default:
   2370 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2371 				aprint_error_dev(sc->sc_dev,
   2372 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2373 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2374 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2375 		}
   2376 	}
   2377 
   2378 	ifp = &sc->sc_ethercom.ec_if;
   2379 	xname = device_xname(sc->sc_dev);
   2380 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2381 	ifp->if_softc = sc;
   2382 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2383 	ifp->if_ioctl = wm_ioctl;
   2384 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2385 		ifp->if_start = wm_nq_start;
   2386 	else
   2387 		ifp->if_start = wm_start;
   2388 	ifp->if_watchdog = wm_watchdog;
   2389 	ifp->if_init = wm_init;
   2390 	ifp->if_stop = wm_stop;
   2391 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2392 	IFQ_SET_READY(&ifp->if_snd);
   2393 
   2394 	/* Check for jumbo frame */
   2395 	switch (sc->sc_type) {
   2396 	case WM_T_82573:
   2397 		/* XXX limited to 9234 if ASPM is disabled */
   2398 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2399 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2400 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2401 		break;
   2402 	case WM_T_82571:
   2403 	case WM_T_82572:
   2404 	case WM_T_82574:
   2405 	case WM_T_82575:
   2406 	case WM_T_82576:
   2407 	case WM_T_82580:
   2408 	case WM_T_I350:
   2409 	case WM_T_I354: /* XXXX ok? */
   2410 	case WM_T_I210:
   2411 	case WM_T_I211:
   2412 	case WM_T_80003:
   2413 	case WM_T_ICH9:
   2414 	case WM_T_ICH10:
   2415 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2416 	case WM_T_PCH_LPT:
   2417 		/* XXX limited to 9234 */
   2418 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2419 		break;
   2420 	case WM_T_PCH:
   2421 		/* XXX limited to 4096 */
   2422 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2423 		break;
   2424 	case WM_T_82542_2_0:
   2425 	case WM_T_82542_2_1:
   2426 	case WM_T_82583:
   2427 	case WM_T_ICH8:
   2428 		/* No support for jumbo frame */
   2429 		break;
   2430 	default:
   2431 		/* ETHER_MAX_LEN_JUMBO */
   2432 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2433 		break;
   2434 	}
   2435 
   2436 	/* If we're a i82543 or greater, we can support VLANs. */
   2437 	if (sc->sc_type >= WM_T_82543)
   2438 		sc->sc_ethercom.ec_capabilities |=
   2439 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2440 
   2441 	/*
   2442 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2443 	 * on i82543 and later.
   2444 	 */
   2445 	if (sc->sc_type >= WM_T_82543) {
   2446 		ifp->if_capabilities |=
   2447 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2448 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2449 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2450 		    IFCAP_CSUM_TCPv6_Tx |
   2451 		    IFCAP_CSUM_UDPv6_Tx;
   2452 	}
   2453 
   2454 	/*
   2455 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2456 	 *
   2457 	 *	82541GI (8086:1076) ... no
   2458 	 *	82572EI (8086:10b9) ... yes
   2459 	 */
   2460 	if (sc->sc_type >= WM_T_82571) {
   2461 		ifp->if_capabilities |=
   2462 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2463 	}
   2464 
   2465 	/*
   2466 	 * If we're a i82544 or greater (except i82547), we can do
   2467 	 * TCP segmentation offload.
   2468 	 */
   2469 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2470 		ifp->if_capabilities |= IFCAP_TSOv4;
   2471 	}
   2472 
   2473 	if (sc->sc_type >= WM_T_82571) {
   2474 		ifp->if_capabilities |= IFCAP_TSOv6;
   2475 	}
   2476 
   2477 #ifdef WM_MPSAFE
   2478 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2479 #else
   2480 	sc->sc_core_lock = NULL;
   2481 #endif
   2482 
   2483 	/* Attach the interface. */
   2484 	if_attach(ifp);
   2485 	ether_ifattach(ifp, enaddr);
   2486 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2487 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2488 			  RND_FLAG_DEFAULT);
   2489 
   2490 #ifdef WM_EVENT_COUNTERS
   2491 	/* Attach event counters. */
   2492 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2493 	    NULL, xname, "txsstall");
   2494 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2495 	    NULL, xname, "txdstall");
   2496 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2497 	    NULL, xname, "txfifo_stall");
   2498 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2499 	    NULL, xname, "txdw");
   2500 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2501 	    NULL, xname, "txqe");
   2502 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2503 	    NULL, xname, "rxintr");
   2504 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2505 	    NULL, xname, "linkintr");
   2506 
   2507 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2508 	    NULL, xname, "rxipsum");
   2509 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2510 	    NULL, xname, "rxtusum");
   2511 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2512 	    NULL, xname, "txipsum");
   2513 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2514 	    NULL, xname, "txtusum");
   2515 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2516 	    NULL, xname, "txtusum6");
   2517 
   2518 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2519 	    NULL, xname, "txtso");
   2520 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2521 	    NULL, xname, "txtso6");
   2522 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2523 	    NULL, xname, "txtsopain");
   2524 
   2525 	for (i = 0; i < WM_NTXSEGS; i++) {
   2526 		snprintf(wm_txseg_evcnt_names[i],
   2527 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2528 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2529 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2530 	}
   2531 
   2532 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2533 	    NULL, xname, "txdrop");
   2534 
   2535 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2536 	    NULL, xname, "tu");
   2537 
   2538 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2539 	    NULL, xname, "tx_xoff");
   2540 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2541 	    NULL, xname, "tx_xon");
   2542 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2543 	    NULL, xname, "rx_xoff");
   2544 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2545 	    NULL, xname, "rx_xon");
   2546 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2547 	    NULL, xname, "rx_macctl");
   2548 #endif /* WM_EVENT_COUNTERS */
   2549 
   2550 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2551 		pmf_class_network_register(self, ifp);
   2552 	else
   2553 		aprint_error_dev(self, "couldn't establish power handler\n");
   2554 
   2555 	sc->sc_flags |= WM_F_ATTACHED;
   2556  out:
   2557 	return;
   2558 }
   2559 
   2560 /* The detach function (ca_detach) */
   2561 static int
   2562 wm_detach(device_t self, int flags __unused)
   2563 {
   2564 	struct wm_softc *sc = device_private(self);
   2565 	struct wm_rxqueue *rxq = sc->sc_rxq;
   2566 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2567 	int i;
   2568 #ifndef WM_MPSAFE
   2569 	int s;
   2570 #endif
   2571 
   2572 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2573 		return 0;
   2574 
   2575 #ifndef WM_MPSAFE
   2576 	s = splnet();
   2577 #endif
   2578 	/* Stop the interface. Callouts are stopped in it. */
   2579 	wm_stop(ifp, 1);
   2580 
   2581 #ifndef WM_MPSAFE
   2582 	splx(s);
   2583 #endif
   2584 
   2585 	pmf_device_deregister(self);
   2586 
   2587 	/* Tell the firmware about the release */
   2588 	WM_CORE_LOCK(sc);
   2589 	wm_release_manageability(sc);
   2590 	wm_release_hw_control(sc);
   2591 	WM_CORE_UNLOCK(sc);
   2592 
   2593 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2594 
   2595 	/* Delete all remaining media. */
   2596 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2597 
   2598 	ether_ifdetach(ifp);
   2599 	if_detach(ifp);
   2600 
   2601 
   2602 	/* Unload RX dmamaps and free mbufs */
   2603 	WM_RX_LOCK(rxq);
   2604 	wm_rxdrain(rxq);
   2605 	WM_RX_UNLOCK(rxq);
   2606 	/* Must unlock here */
   2607 
   2608 	wm_free_txrx_queues(sc);
   2609 
   2610 	/* Disestablish the interrupt handler */
   2611 	for (i = 0; i < sc->sc_nintrs; i++) {
   2612 		if (sc->sc_ihs[i] != NULL) {
   2613 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2614 			sc->sc_ihs[i] = NULL;
   2615 		}
   2616 	}
   2617 #ifdef WM_MSI_MSIX
   2618 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2619 #endif /* WM_MSI_MSIX */
   2620 
   2621 	/* Unmap the registers */
   2622 	if (sc->sc_ss) {
   2623 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2624 		sc->sc_ss = 0;
   2625 	}
   2626 	if (sc->sc_ios) {
   2627 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2628 		sc->sc_ios = 0;
   2629 	}
   2630 	if (sc->sc_flashs) {
   2631 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2632 		sc->sc_flashs = 0;
   2633 	}
   2634 
   2635 	if (sc->sc_core_lock)
   2636 		mutex_obj_free(sc->sc_core_lock);
   2637 
   2638 	return 0;
   2639 }
   2640 
   2641 static bool
   2642 wm_suspend(device_t self, const pmf_qual_t *qual)
   2643 {
   2644 	struct wm_softc *sc = device_private(self);
   2645 
   2646 	wm_release_manageability(sc);
   2647 	wm_release_hw_control(sc);
   2648 #ifdef WM_WOL
   2649 	wm_enable_wakeup(sc);
   2650 #endif
   2651 
   2652 	return true;
   2653 }
   2654 
   2655 static bool
   2656 wm_resume(device_t self, const pmf_qual_t *qual)
   2657 {
   2658 	struct wm_softc *sc = device_private(self);
   2659 
   2660 	wm_init_manageability(sc);
   2661 
   2662 	return true;
   2663 }
   2664 
   2665 /*
   2666  * wm_watchdog:		[ifnet interface function]
   2667  *
   2668  *	Watchdog timer handler.
   2669  */
   2670 static void
   2671 wm_watchdog(struct ifnet *ifp)
   2672 {
   2673 	struct wm_softc *sc = ifp->if_softc;
   2674 	struct wm_txqueue *txq = sc->sc_txq;
   2675 
   2676 	/*
   2677 	 * Since we're using delayed interrupts, sweep up
   2678 	 * before we report an error.
   2679 	 */
   2680 	WM_TX_LOCK(txq);
   2681 	wm_txeof(sc);
   2682 	WM_TX_UNLOCK(txq);
   2683 
   2684 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2685 #ifdef WM_DEBUG
   2686 		int i, j;
   2687 		struct wm_txsoft *txs;
   2688 #endif
   2689 		log(LOG_ERR,
   2690 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2691 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2692 		    txq->txq_next);
   2693 		ifp->if_oerrors++;
   2694 #ifdef WM_DEBUG
   2695 		for (i = txq->txq_txsdirty; i != txq->txq_txsnext ;
   2696 		    i = WM_NEXTTXS(txq, i)) {
   2697 		    txs = &txq->txq_txsoft[i];
   2698 		    printf("txs %d tx %d -> %d\n",
   2699 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2700 		    for (j = txs->txs_firstdesc; ;
   2701 			j = WM_NEXTTX(txq, j)) {
   2702 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2703 			    txq->txq_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2704 			printf("\t %#08x%08x\n",
   2705 			    txq->txq_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2706 			    txq->txq_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2707 			if (j == txs->txs_lastdesc)
   2708 				break;
   2709 			}
   2710 		}
   2711 #endif
   2712 		/* Reset the interface. */
   2713 		(void) wm_init(ifp);
   2714 	}
   2715 
   2716 	/* Try to get more packets going. */
   2717 	ifp->if_start(ifp);
   2718 }
   2719 
   2720 /*
   2721  * wm_tick:
   2722  *
   2723  *	One second timer, used to check link status, sweep up
   2724  *	completed transmit jobs, etc.
   2725  */
   2726 static void
   2727 wm_tick(void *arg)
   2728 {
   2729 	struct wm_softc *sc = arg;
   2730 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2731 #ifndef WM_MPSAFE
   2732 	int s;
   2733 
   2734 	s = splnet();
   2735 #endif
   2736 
   2737 	WM_CORE_LOCK(sc);
   2738 
   2739 	if (sc->sc_stopping)
   2740 		goto out;
   2741 
   2742 	if (sc->sc_type >= WM_T_82542_2_1) {
   2743 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2744 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2745 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2746 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2747 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2748 	}
   2749 
   2750 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2751 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2752 	    + CSR_READ(sc, WMREG_CRCERRS)
   2753 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2754 	    + CSR_READ(sc, WMREG_SYMERRC)
   2755 	    + CSR_READ(sc, WMREG_RXERRC)
   2756 	    + CSR_READ(sc, WMREG_SEC)
   2757 	    + CSR_READ(sc, WMREG_CEXTERR)
   2758 	    + CSR_READ(sc, WMREG_RLEC);
   2759 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2760 
   2761 	if (sc->sc_flags & WM_F_HAS_MII)
   2762 		mii_tick(&sc->sc_mii);
   2763 	else if ((sc->sc_type >= WM_T_82575)
   2764 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2765 		wm_serdes_tick(sc);
   2766 	else
   2767 		wm_tbi_tick(sc);
   2768 
   2769 out:
   2770 	WM_CORE_UNLOCK(sc);
   2771 #ifndef WM_MPSAFE
   2772 	splx(s);
   2773 #endif
   2774 
   2775 	if (!sc->sc_stopping)
   2776 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2777 }
   2778 
   2779 static int
   2780 wm_ifflags_cb(struct ethercom *ec)
   2781 {
   2782 	struct ifnet *ifp = &ec->ec_if;
   2783 	struct wm_softc *sc = ifp->if_softc;
   2784 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2785 	int rc = 0;
   2786 
   2787 	WM_CORE_LOCK(sc);
   2788 
   2789 	if (change != 0)
   2790 		sc->sc_if_flags = ifp->if_flags;
   2791 
   2792 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2793 		rc = ENETRESET;
   2794 		goto out;
   2795 	}
   2796 
   2797 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2798 		wm_set_filter(sc);
   2799 
   2800 	wm_set_vlan(sc);
   2801 
   2802 out:
   2803 	WM_CORE_UNLOCK(sc);
   2804 
   2805 	return rc;
   2806 }
   2807 
   2808 /*
   2809  * wm_ioctl:		[ifnet interface function]
   2810  *
   2811  *	Handle control requests from the operator.
   2812  */
   2813 static int
   2814 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2815 {
   2816 	struct wm_softc *sc = ifp->if_softc;
   2817 	struct ifreq *ifr = (struct ifreq *) data;
   2818 	struct ifaddr *ifa = (struct ifaddr *)data;
   2819 	struct sockaddr_dl *sdl;
   2820 	int s, error;
   2821 
   2822 #ifndef WM_MPSAFE
   2823 	s = splnet();
   2824 #endif
   2825 	switch (cmd) {
   2826 	case SIOCSIFMEDIA:
   2827 	case SIOCGIFMEDIA:
   2828 		WM_CORE_LOCK(sc);
   2829 		/* Flow control requires full-duplex mode. */
   2830 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2831 		    (ifr->ifr_media & IFM_FDX) == 0)
   2832 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2833 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2834 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2835 				/* We can do both TXPAUSE and RXPAUSE. */
   2836 				ifr->ifr_media |=
   2837 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2838 			}
   2839 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2840 		}
   2841 		WM_CORE_UNLOCK(sc);
   2842 #ifdef WM_MPSAFE
   2843 		s = splnet();
   2844 #endif
   2845 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2846 #ifdef WM_MPSAFE
   2847 		splx(s);
   2848 #endif
   2849 		break;
   2850 	case SIOCINITIFADDR:
   2851 		WM_CORE_LOCK(sc);
   2852 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2853 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2854 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2855 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2856 			/* unicast address is first multicast entry */
   2857 			wm_set_filter(sc);
   2858 			error = 0;
   2859 			WM_CORE_UNLOCK(sc);
   2860 			break;
   2861 		}
   2862 		WM_CORE_UNLOCK(sc);
   2863 		/*FALLTHROUGH*/
   2864 	default:
   2865 #ifdef WM_MPSAFE
   2866 		s = splnet();
   2867 #endif
   2868 		/* It may call wm_start, so unlock here */
   2869 		error = ether_ioctl(ifp, cmd, data);
   2870 #ifdef WM_MPSAFE
   2871 		splx(s);
   2872 #endif
   2873 		if (error != ENETRESET)
   2874 			break;
   2875 
   2876 		error = 0;
   2877 
   2878 		if (cmd == SIOCSIFCAP) {
   2879 			error = (*ifp->if_init)(ifp);
   2880 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2881 			;
   2882 		else if (ifp->if_flags & IFF_RUNNING) {
   2883 			/*
   2884 			 * Multicast list has changed; set the hardware filter
   2885 			 * accordingly.
   2886 			 */
   2887 			WM_CORE_LOCK(sc);
   2888 			wm_set_filter(sc);
   2889 			WM_CORE_UNLOCK(sc);
   2890 		}
   2891 		break;
   2892 	}
   2893 
   2894 #ifndef WM_MPSAFE
   2895 	splx(s);
   2896 #endif
   2897 	return error;
   2898 }
   2899 
   2900 /* MAC address related */
   2901 
   2902 /*
   2903  * Get the offset of MAC address and return it.
   2904  * If error occured, use offset 0.
   2905  */
   2906 static uint16_t
   2907 wm_check_alt_mac_addr(struct wm_softc *sc)
   2908 {
   2909 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2910 	uint16_t offset = NVM_OFF_MACADDR;
   2911 
   2912 	/* Try to read alternative MAC address pointer */
   2913 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2914 		return 0;
   2915 
   2916 	/* Check pointer if it's valid or not. */
   2917 	if ((offset == 0x0000) || (offset == 0xffff))
   2918 		return 0;
   2919 
   2920 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2921 	/*
   2922 	 * Check whether alternative MAC address is valid or not.
   2923 	 * Some cards have non 0xffff pointer but those don't use
   2924 	 * alternative MAC address in reality.
   2925 	 *
   2926 	 * Check whether the broadcast bit is set or not.
   2927 	 */
   2928 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2929 		if (((myea[0] & 0xff) & 0x01) == 0)
   2930 			return offset; /* Found */
   2931 
   2932 	/* Not found */
   2933 	return 0;
   2934 }
   2935 
   2936 static int
   2937 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2938 {
   2939 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2940 	uint16_t offset = NVM_OFF_MACADDR;
   2941 	int do_invert = 0;
   2942 
   2943 	switch (sc->sc_type) {
   2944 	case WM_T_82580:
   2945 	case WM_T_I350:
   2946 	case WM_T_I354:
   2947 		/* EEPROM Top Level Partitioning */
   2948 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2949 		break;
   2950 	case WM_T_82571:
   2951 	case WM_T_82575:
   2952 	case WM_T_82576:
   2953 	case WM_T_80003:
   2954 	case WM_T_I210:
   2955 	case WM_T_I211:
   2956 		offset = wm_check_alt_mac_addr(sc);
   2957 		if (offset == 0)
   2958 			if ((sc->sc_funcid & 0x01) == 1)
   2959 				do_invert = 1;
   2960 		break;
   2961 	default:
   2962 		if ((sc->sc_funcid & 0x01) == 1)
   2963 			do_invert = 1;
   2964 		break;
   2965 	}
   2966 
   2967 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2968 		myea) != 0)
   2969 		goto bad;
   2970 
   2971 	enaddr[0] = myea[0] & 0xff;
   2972 	enaddr[1] = myea[0] >> 8;
   2973 	enaddr[2] = myea[1] & 0xff;
   2974 	enaddr[3] = myea[1] >> 8;
   2975 	enaddr[4] = myea[2] & 0xff;
   2976 	enaddr[5] = myea[2] >> 8;
   2977 
   2978 	/*
   2979 	 * Toggle the LSB of the MAC address on the second port
   2980 	 * of some dual port cards.
   2981 	 */
   2982 	if (do_invert != 0)
   2983 		enaddr[5] ^= 1;
   2984 
   2985 	return 0;
   2986 
   2987  bad:
   2988 	return -1;
   2989 }
   2990 
   2991 /*
   2992  * wm_set_ral:
   2993  *
   2994  *	Set an entery in the receive address list.
   2995  */
   2996 static void
   2997 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2998 {
   2999 	uint32_t ral_lo, ral_hi;
   3000 
   3001 	if (enaddr != NULL) {
   3002 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3003 		    (enaddr[3] << 24);
   3004 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3005 		ral_hi |= RAL_AV;
   3006 	} else {
   3007 		ral_lo = 0;
   3008 		ral_hi = 0;
   3009 	}
   3010 
   3011 	if (sc->sc_type >= WM_T_82544) {
   3012 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3013 		    ral_lo);
   3014 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3015 		    ral_hi);
   3016 	} else {
   3017 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3018 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3019 	}
   3020 }
   3021 
   3022 /*
   3023  * wm_mchash:
   3024  *
   3025  *	Compute the hash of the multicast address for the 4096-bit
   3026  *	multicast filter.
   3027  */
   3028 static uint32_t
   3029 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3030 {
   3031 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3032 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3033 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3034 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3035 	uint32_t hash;
   3036 
   3037 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3038 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3039 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3040 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3041 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3042 		return (hash & 0x3ff);
   3043 	}
   3044 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3045 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3046 
   3047 	return (hash & 0xfff);
   3048 }
   3049 
   3050 /*
   3051  * wm_set_filter:
   3052  *
   3053  *	Set up the receive filter.
   3054  */
   3055 static void
   3056 wm_set_filter(struct wm_softc *sc)
   3057 {
   3058 	struct ethercom *ec = &sc->sc_ethercom;
   3059 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3060 	struct ether_multi *enm;
   3061 	struct ether_multistep step;
   3062 	bus_addr_t mta_reg;
   3063 	uint32_t hash, reg, bit;
   3064 	int i, size;
   3065 
   3066 	if (sc->sc_type >= WM_T_82544)
   3067 		mta_reg = WMREG_CORDOVA_MTA;
   3068 	else
   3069 		mta_reg = WMREG_MTA;
   3070 
   3071 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3072 
   3073 	if (ifp->if_flags & IFF_BROADCAST)
   3074 		sc->sc_rctl |= RCTL_BAM;
   3075 	if (ifp->if_flags & IFF_PROMISC) {
   3076 		sc->sc_rctl |= RCTL_UPE;
   3077 		goto allmulti;
   3078 	}
   3079 
   3080 	/*
   3081 	 * Set the station address in the first RAL slot, and
   3082 	 * clear the remaining slots.
   3083 	 */
   3084 	if (sc->sc_type == WM_T_ICH8)
   3085 		size = WM_RAL_TABSIZE_ICH8 -1;
   3086 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3087 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3088 	    || (sc->sc_type == WM_T_PCH_LPT))
   3089 		size = WM_RAL_TABSIZE_ICH8;
   3090 	else if (sc->sc_type == WM_T_82575)
   3091 		size = WM_RAL_TABSIZE_82575;
   3092 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3093 		size = WM_RAL_TABSIZE_82576;
   3094 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3095 		size = WM_RAL_TABSIZE_I350;
   3096 	else
   3097 		size = WM_RAL_TABSIZE;
   3098 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3099 	for (i = 1; i < size; i++)
   3100 		wm_set_ral(sc, NULL, i);
   3101 
   3102 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3103 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3104 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3105 		size = WM_ICH8_MC_TABSIZE;
   3106 	else
   3107 		size = WM_MC_TABSIZE;
   3108 	/* Clear out the multicast table. */
   3109 	for (i = 0; i < size; i++)
   3110 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3111 
   3112 	ETHER_FIRST_MULTI(step, ec, enm);
   3113 	while (enm != NULL) {
   3114 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3115 			/*
   3116 			 * We must listen to a range of multicast addresses.
   3117 			 * For now, just accept all multicasts, rather than
   3118 			 * trying to set only those filter bits needed to match
   3119 			 * the range.  (At this time, the only use of address
   3120 			 * ranges is for IP multicast routing, for which the
   3121 			 * range is big enough to require all bits set.)
   3122 			 */
   3123 			goto allmulti;
   3124 		}
   3125 
   3126 		hash = wm_mchash(sc, enm->enm_addrlo);
   3127 
   3128 		reg = (hash >> 5);
   3129 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3130 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3131 		    || (sc->sc_type == WM_T_PCH2)
   3132 		    || (sc->sc_type == WM_T_PCH_LPT))
   3133 			reg &= 0x1f;
   3134 		else
   3135 			reg &= 0x7f;
   3136 		bit = hash & 0x1f;
   3137 
   3138 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3139 		hash |= 1U << bit;
   3140 
   3141 		/* XXX Hardware bug?? */
   3142 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3143 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3144 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3145 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3146 		} else
   3147 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3148 
   3149 		ETHER_NEXT_MULTI(step, enm);
   3150 	}
   3151 
   3152 	ifp->if_flags &= ~IFF_ALLMULTI;
   3153 	goto setit;
   3154 
   3155  allmulti:
   3156 	ifp->if_flags |= IFF_ALLMULTI;
   3157 	sc->sc_rctl |= RCTL_MPE;
   3158 
   3159  setit:
   3160 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3161 }
   3162 
   3163 /* Reset and init related */
   3164 
   3165 static void
   3166 wm_set_vlan(struct wm_softc *sc)
   3167 {
   3168 	/* Deal with VLAN enables. */
   3169 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3170 		sc->sc_ctrl |= CTRL_VME;
   3171 	else
   3172 		sc->sc_ctrl &= ~CTRL_VME;
   3173 
   3174 	/* Write the control registers. */
   3175 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3176 }
   3177 
   3178 static void
   3179 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3180 {
   3181 	uint32_t gcr;
   3182 	pcireg_t ctrl2;
   3183 
   3184 	gcr = CSR_READ(sc, WMREG_GCR);
   3185 
   3186 	/* Only take action if timeout value is defaulted to 0 */
   3187 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3188 		goto out;
   3189 
   3190 	if ((gcr & GCR_CAP_VER2) == 0) {
   3191 		gcr |= GCR_CMPL_TMOUT_10MS;
   3192 		goto out;
   3193 	}
   3194 
   3195 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3196 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3197 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3198 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3199 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3200 
   3201 out:
   3202 	/* Disable completion timeout resend */
   3203 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3204 
   3205 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3206 }
   3207 
   3208 void
   3209 wm_get_auto_rd_done(struct wm_softc *sc)
   3210 {
   3211 	int i;
   3212 
   3213 	/* wait for eeprom to reload */
   3214 	switch (sc->sc_type) {
   3215 	case WM_T_82571:
   3216 	case WM_T_82572:
   3217 	case WM_T_82573:
   3218 	case WM_T_82574:
   3219 	case WM_T_82583:
   3220 	case WM_T_82575:
   3221 	case WM_T_82576:
   3222 	case WM_T_82580:
   3223 	case WM_T_I350:
   3224 	case WM_T_I354:
   3225 	case WM_T_I210:
   3226 	case WM_T_I211:
   3227 	case WM_T_80003:
   3228 	case WM_T_ICH8:
   3229 	case WM_T_ICH9:
   3230 		for (i = 0; i < 10; i++) {
   3231 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3232 				break;
   3233 			delay(1000);
   3234 		}
   3235 		if (i == 10) {
   3236 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3237 			    "complete\n", device_xname(sc->sc_dev));
   3238 		}
   3239 		break;
   3240 	default:
   3241 		break;
   3242 	}
   3243 }
   3244 
   3245 void
   3246 wm_lan_init_done(struct wm_softc *sc)
   3247 {
   3248 	uint32_t reg = 0;
   3249 	int i;
   3250 
   3251 	/* wait for eeprom to reload */
   3252 	switch (sc->sc_type) {
   3253 	case WM_T_ICH10:
   3254 	case WM_T_PCH:
   3255 	case WM_T_PCH2:
   3256 	case WM_T_PCH_LPT:
   3257 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3258 			reg = CSR_READ(sc, WMREG_STATUS);
   3259 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3260 				break;
   3261 			delay(100);
   3262 		}
   3263 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3264 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3265 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3266 		}
   3267 		break;
   3268 	default:
   3269 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3270 		    __func__);
   3271 		break;
   3272 	}
   3273 
   3274 	reg &= ~STATUS_LAN_INIT_DONE;
   3275 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3276 }
   3277 
   3278 void
   3279 wm_get_cfg_done(struct wm_softc *sc)
   3280 {
   3281 	int mask;
   3282 	uint32_t reg;
   3283 	int i;
   3284 
   3285 	/* wait for eeprom to reload */
   3286 	switch (sc->sc_type) {
   3287 	case WM_T_82542_2_0:
   3288 	case WM_T_82542_2_1:
   3289 		/* null */
   3290 		break;
   3291 	case WM_T_82543:
   3292 	case WM_T_82544:
   3293 	case WM_T_82540:
   3294 	case WM_T_82545:
   3295 	case WM_T_82545_3:
   3296 	case WM_T_82546:
   3297 	case WM_T_82546_3:
   3298 	case WM_T_82541:
   3299 	case WM_T_82541_2:
   3300 	case WM_T_82547:
   3301 	case WM_T_82547_2:
   3302 	case WM_T_82573:
   3303 	case WM_T_82574:
   3304 	case WM_T_82583:
   3305 		/* generic */
   3306 		delay(10*1000);
   3307 		break;
   3308 	case WM_T_80003:
   3309 	case WM_T_82571:
   3310 	case WM_T_82572:
   3311 	case WM_T_82575:
   3312 	case WM_T_82576:
   3313 	case WM_T_82580:
   3314 	case WM_T_I350:
   3315 	case WM_T_I354:
   3316 	case WM_T_I210:
   3317 	case WM_T_I211:
   3318 		if (sc->sc_type == WM_T_82571) {
   3319 			/* Only 82571 shares port 0 */
   3320 			mask = EEMNGCTL_CFGDONE_0;
   3321 		} else
   3322 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3323 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3324 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3325 				break;
   3326 			delay(1000);
   3327 		}
   3328 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3329 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3330 				device_xname(sc->sc_dev), __func__));
   3331 		}
   3332 		break;
   3333 	case WM_T_ICH8:
   3334 	case WM_T_ICH9:
   3335 	case WM_T_ICH10:
   3336 	case WM_T_PCH:
   3337 	case WM_T_PCH2:
   3338 	case WM_T_PCH_LPT:
   3339 		delay(10*1000);
   3340 		if (sc->sc_type >= WM_T_ICH10)
   3341 			wm_lan_init_done(sc);
   3342 		else
   3343 			wm_get_auto_rd_done(sc);
   3344 
   3345 		reg = CSR_READ(sc, WMREG_STATUS);
   3346 		if ((reg & STATUS_PHYRA) != 0)
   3347 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3348 		break;
   3349 	default:
   3350 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3351 		    __func__);
   3352 		break;
   3353 	}
   3354 }
   3355 
   3356 /* Init hardware bits */
   3357 void
   3358 wm_initialize_hardware_bits(struct wm_softc *sc)
   3359 {
   3360 	uint32_t tarc0, tarc1, reg;
   3361 
   3362 	/* For 82571 variant, 80003 and ICHs */
   3363 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3364 	    || (sc->sc_type >= WM_T_80003)) {
   3365 
   3366 		/* Transmit Descriptor Control 0 */
   3367 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3368 		reg |= TXDCTL_COUNT_DESC;
   3369 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3370 
   3371 		/* Transmit Descriptor Control 1 */
   3372 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3373 		reg |= TXDCTL_COUNT_DESC;
   3374 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3375 
   3376 		/* TARC0 */
   3377 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3378 		switch (sc->sc_type) {
   3379 		case WM_T_82571:
   3380 		case WM_T_82572:
   3381 		case WM_T_82573:
   3382 		case WM_T_82574:
   3383 		case WM_T_82583:
   3384 		case WM_T_80003:
   3385 			/* Clear bits 30..27 */
   3386 			tarc0 &= ~__BITS(30, 27);
   3387 			break;
   3388 		default:
   3389 			break;
   3390 		}
   3391 
   3392 		switch (sc->sc_type) {
   3393 		case WM_T_82571:
   3394 		case WM_T_82572:
   3395 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3396 
   3397 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3398 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3399 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3400 			/* 8257[12] Errata No.7 */
   3401 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3402 
   3403 			/* TARC1 bit 28 */
   3404 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3405 				tarc1 &= ~__BIT(28);
   3406 			else
   3407 				tarc1 |= __BIT(28);
   3408 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3409 
   3410 			/*
   3411 			 * 8257[12] Errata No.13
   3412 			 * Disable Dyamic Clock Gating.
   3413 			 */
   3414 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3415 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3416 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3417 			break;
   3418 		case WM_T_82573:
   3419 		case WM_T_82574:
   3420 		case WM_T_82583:
   3421 			if ((sc->sc_type == WM_T_82574)
   3422 			    || (sc->sc_type == WM_T_82583))
   3423 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3424 
   3425 			/* Extended Device Control */
   3426 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3427 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3428 			reg |= __BIT(22);	/* Set bit 22 */
   3429 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3430 
   3431 			/* Device Control */
   3432 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3433 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3434 
   3435 			/* PCIe Control Register */
   3436 			/*
   3437 			 * 82573 Errata (unknown).
   3438 			 *
   3439 			 * 82574 Errata 25 and 82583 Errata 12
   3440 			 * "Dropped Rx Packets":
   3441 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3442 			 */
   3443 			reg = CSR_READ(sc, WMREG_GCR);
   3444 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3445 			CSR_WRITE(sc, WMREG_GCR, reg);
   3446 
   3447 			if ((sc->sc_type == WM_T_82574)
   3448 			    || (sc->sc_type == WM_T_82583)) {
   3449 				/*
   3450 				 * Document says this bit must be set for
   3451 				 * proper operation.
   3452 				 */
   3453 				reg = CSR_READ(sc, WMREG_GCR);
   3454 				reg |= __BIT(22);
   3455 				CSR_WRITE(sc, WMREG_GCR, reg);
   3456 
   3457 				/*
   3458 				 * Apply workaround for hardware errata
   3459 				 * documented in errata docs Fixes issue where
   3460 				 * some error prone or unreliable PCIe
   3461 				 * completions are occurring, particularly
   3462 				 * with ASPM enabled. Without fix, issue can
   3463 				 * cause Tx timeouts.
   3464 				 */
   3465 				reg = CSR_READ(sc, WMREG_GCR2);
   3466 				reg |= __BIT(0);
   3467 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3468 			}
   3469 			break;
   3470 		case WM_T_80003:
   3471 			/* TARC0 */
   3472 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3473 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3474 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3475 
   3476 			/* TARC1 bit 28 */
   3477 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3478 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3479 				tarc1 &= ~__BIT(28);
   3480 			else
   3481 				tarc1 |= __BIT(28);
   3482 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3483 			break;
   3484 		case WM_T_ICH8:
   3485 		case WM_T_ICH9:
   3486 		case WM_T_ICH10:
   3487 		case WM_T_PCH:
   3488 		case WM_T_PCH2:
   3489 		case WM_T_PCH_LPT:
   3490 			/* TARC 0 */
   3491 			if (sc->sc_type == WM_T_ICH8) {
   3492 				/* Set TARC0 bits 29 and 28 */
   3493 				tarc0 |= __BITS(29, 28);
   3494 			}
   3495 			/* Set TARC0 bits 23,24,26,27 */
   3496 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3497 
   3498 			/* CTRL_EXT */
   3499 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3500 			reg |= __BIT(22);	/* Set bit 22 */
   3501 			/*
   3502 			 * Enable PHY low-power state when MAC is at D3
   3503 			 * w/o WoL
   3504 			 */
   3505 			if (sc->sc_type >= WM_T_PCH)
   3506 				reg |= CTRL_EXT_PHYPDEN;
   3507 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3508 
   3509 			/* TARC1 */
   3510 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3511 			/* bit 28 */
   3512 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3513 				tarc1 &= ~__BIT(28);
   3514 			else
   3515 				tarc1 |= __BIT(28);
   3516 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3517 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3518 
   3519 			/* Device Status */
   3520 			if (sc->sc_type == WM_T_ICH8) {
   3521 				reg = CSR_READ(sc, WMREG_STATUS);
   3522 				reg &= ~__BIT(31);
   3523 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3524 
   3525 			}
   3526 
   3527 			/*
   3528 			 * Work-around descriptor data corruption issue during
   3529 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3530 			 * capability.
   3531 			 */
   3532 			reg = CSR_READ(sc, WMREG_RFCTL);
   3533 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3534 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3535 			break;
   3536 		default:
   3537 			break;
   3538 		}
   3539 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3540 
   3541 		/*
   3542 		 * 8257[12] Errata No.52 and some others.
   3543 		 * Avoid RSS Hash Value bug.
   3544 		 */
   3545 		switch (sc->sc_type) {
   3546 		case WM_T_82571:
   3547 		case WM_T_82572:
   3548 		case WM_T_82573:
   3549 		case WM_T_80003:
   3550 		case WM_T_ICH8:
   3551 			reg = CSR_READ(sc, WMREG_RFCTL);
   3552 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3553 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3554 			break;
   3555 		default:
   3556 			break;
   3557 		}
   3558 	}
   3559 }
   3560 
   3561 static uint32_t
   3562 wm_rxpbs_adjust_82580(uint32_t val)
   3563 {
   3564 	uint32_t rv = 0;
   3565 
   3566 	if (val < __arraycount(wm_82580_rxpbs_table))
   3567 		rv = wm_82580_rxpbs_table[val];
   3568 
   3569 	return rv;
   3570 }
   3571 
   3572 /*
   3573  * wm_reset:
   3574  *
   3575  *	Reset the i82542 chip.
   3576  */
   3577 static void
   3578 wm_reset(struct wm_softc *sc)
   3579 {
   3580 	struct wm_txqueue *txq = sc->sc_txq;
   3581 	int phy_reset = 0;
   3582 	int error = 0;
   3583 	uint32_t reg, mask;
   3584 
   3585 	/*
   3586 	 * Allocate on-chip memory according to the MTU size.
   3587 	 * The Packet Buffer Allocation register must be written
   3588 	 * before the chip is reset.
   3589 	 */
   3590 	switch (sc->sc_type) {
   3591 	case WM_T_82547:
   3592 	case WM_T_82547_2:
   3593 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3594 		    PBA_22K : PBA_30K;
   3595 		txq->txq_fifo_head = 0;
   3596 		txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3597 		txq->txq_fifo_size =
   3598 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3599 		txq->txq_fifo_stall = 0;
   3600 		break;
   3601 	case WM_T_82571:
   3602 	case WM_T_82572:
   3603 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3604 	case WM_T_80003:
   3605 		sc->sc_pba = PBA_32K;
   3606 		break;
   3607 	case WM_T_82573:
   3608 		sc->sc_pba = PBA_12K;
   3609 		break;
   3610 	case WM_T_82574:
   3611 	case WM_T_82583:
   3612 		sc->sc_pba = PBA_20K;
   3613 		break;
   3614 	case WM_T_82576:
   3615 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3616 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3617 		break;
   3618 	case WM_T_82580:
   3619 	case WM_T_I350:
   3620 	case WM_T_I354:
   3621 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3622 		break;
   3623 	case WM_T_I210:
   3624 	case WM_T_I211:
   3625 		sc->sc_pba = PBA_34K;
   3626 		break;
   3627 	case WM_T_ICH8:
   3628 		/* Workaround for a bit corruption issue in FIFO memory */
   3629 		sc->sc_pba = PBA_8K;
   3630 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3631 		break;
   3632 	case WM_T_ICH9:
   3633 	case WM_T_ICH10:
   3634 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3635 		    PBA_14K : PBA_10K;
   3636 		break;
   3637 	case WM_T_PCH:
   3638 	case WM_T_PCH2:
   3639 	case WM_T_PCH_LPT:
   3640 		sc->sc_pba = PBA_26K;
   3641 		break;
   3642 	default:
   3643 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3644 		    PBA_40K : PBA_48K;
   3645 		break;
   3646 	}
   3647 	/*
   3648 	 * Only old or non-multiqueue devices have the PBA register
   3649 	 * XXX Need special handling for 82575.
   3650 	 */
   3651 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3652 	    || (sc->sc_type == WM_T_82575))
   3653 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3654 
   3655 	/* Prevent the PCI-E bus from sticking */
   3656 	if (sc->sc_flags & WM_F_PCIE) {
   3657 		int timeout = 800;
   3658 
   3659 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3660 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3661 
   3662 		while (timeout--) {
   3663 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3664 			    == 0)
   3665 				break;
   3666 			delay(100);
   3667 		}
   3668 	}
   3669 
   3670 	/* Set the completion timeout for interface */
   3671 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3672 	    || (sc->sc_type == WM_T_82580)
   3673 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3674 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3675 		wm_set_pcie_completion_timeout(sc);
   3676 
   3677 	/* Clear interrupt */
   3678 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3679 	if (sc->sc_nintrs > 1) {
   3680 		if (sc->sc_type != WM_T_82574) {
   3681 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3682 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3683 		} else {
   3684 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3685 		}
   3686 	}
   3687 
   3688 	/* Stop the transmit and receive processes. */
   3689 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3690 	sc->sc_rctl &= ~RCTL_EN;
   3691 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3692 	CSR_WRITE_FLUSH(sc);
   3693 
   3694 	/* XXX set_tbi_sbp_82543() */
   3695 
   3696 	delay(10*1000);
   3697 
   3698 	/* Must acquire the MDIO ownership before MAC reset */
   3699 	switch (sc->sc_type) {
   3700 	case WM_T_82573:
   3701 	case WM_T_82574:
   3702 	case WM_T_82583:
   3703 		error = wm_get_hw_semaphore_82573(sc);
   3704 		break;
   3705 	default:
   3706 		break;
   3707 	}
   3708 
   3709 	/*
   3710 	 * 82541 Errata 29? & 82547 Errata 28?
   3711 	 * See also the description about PHY_RST bit in CTRL register
   3712 	 * in 8254x_GBe_SDM.pdf.
   3713 	 */
   3714 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3715 		CSR_WRITE(sc, WMREG_CTRL,
   3716 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3717 		CSR_WRITE_FLUSH(sc);
   3718 		delay(5000);
   3719 	}
   3720 
   3721 	switch (sc->sc_type) {
   3722 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3723 	case WM_T_82541:
   3724 	case WM_T_82541_2:
   3725 	case WM_T_82547:
   3726 	case WM_T_82547_2:
   3727 		/*
   3728 		 * On some chipsets, a reset through a memory-mapped write
   3729 		 * cycle can cause the chip to reset before completing the
   3730 		 * write cycle.  This causes major headache that can be
   3731 		 * avoided by issuing the reset via indirect register writes
   3732 		 * through I/O space.
   3733 		 *
   3734 		 * So, if we successfully mapped the I/O BAR at attach time,
   3735 		 * use that.  Otherwise, try our luck with a memory-mapped
   3736 		 * reset.
   3737 		 */
   3738 		if (sc->sc_flags & WM_F_IOH_VALID)
   3739 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3740 		else
   3741 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3742 		break;
   3743 	case WM_T_82545_3:
   3744 	case WM_T_82546_3:
   3745 		/* Use the shadow control register on these chips. */
   3746 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3747 		break;
   3748 	case WM_T_80003:
   3749 		mask = swfwphysem[sc->sc_funcid];
   3750 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3751 		wm_get_swfw_semaphore(sc, mask);
   3752 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3753 		wm_put_swfw_semaphore(sc, mask);
   3754 		break;
   3755 	case WM_T_ICH8:
   3756 	case WM_T_ICH9:
   3757 	case WM_T_ICH10:
   3758 	case WM_T_PCH:
   3759 	case WM_T_PCH2:
   3760 	case WM_T_PCH_LPT:
   3761 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3762 		if (wm_check_reset_block(sc) == 0) {
   3763 			/*
   3764 			 * Gate automatic PHY configuration by hardware on
   3765 			 * non-managed 82579
   3766 			 */
   3767 			if ((sc->sc_type == WM_T_PCH2)
   3768 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3769 				!= 0))
   3770 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3771 
   3772 
   3773 			reg |= CTRL_PHY_RESET;
   3774 			phy_reset = 1;
   3775 		}
   3776 		wm_get_swfwhw_semaphore(sc);
   3777 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3778 		/* Don't insert a completion barrier when reset */
   3779 		delay(20*1000);
   3780 		wm_put_swfwhw_semaphore(sc);
   3781 		break;
   3782 	case WM_T_82580:
   3783 	case WM_T_I350:
   3784 	case WM_T_I354:
   3785 	case WM_T_I210:
   3786 	case WM_T_I211:
   3787 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3788 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3789 			CSR_WRITE_FLUSH(sc);
   3790 		delay(5000);
   3791 		break;
   3792 	case WM_T_82542_2_0:
   3793 	case WM_T_82542_2_1:
   3794 	case WM_T_82543:
   3795 	case WM_T_82540:
   3796 	case WM_T_82545:
   3797 	case WM_T_82546:
   3798 	case WM_T_82571:
   3799 	case WM_T_82572:
   3800 	case WM_T_82573:
   3801 	case WM_T_82574:
   3802 	case WM_T_82575:
   3803 	case WM_T_82576:
   3804 	case WM_T_82583:
   3805 	default:
   3806 		/* Everything else can safely use the documented method. */
   3807 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3808 		break;
   3809 	}
   3810 
   3811 	/* Must release the MDIO ownership after MAC reset */
   3812 	switch (sc->sc_type) {
   3813 	case WM_T_82573:
   3814 	case WM_T_82574:
   3815 	case WM_T_82583:
   3816 		if (error == 0)
   3817 			wm_put_hw_semaphore_82573(sc);
   3818 		break;
   3819 	default:
   3820 		break;
   3821 	}
   3822 
   3823 	if (phy_reset != 0)
   3824 		wm_get_cfg_done(sc);
   3825 
   3826 	/* reload EEPROM */
   3827 	switch (sc->sc_type) {
   3828 	case WM_T_82542_2_0:
   3829 	case WM_T_82542_2_1:
   3830 	case WM_T_82543:
   3831 	case WM_T_82544:
   3832 		delay(10);
   3833 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3834 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3835 		CSR_WRITE_FLUSH(sc);
   3836 		delay(2000);
   3837 		break;
   3838 	case WM_T_82540:
   3839 	case WM_T_82545:
   3840 	case WM_T_82545_3:
   3841 	case WM_T_82546:
   3842 	case WM_T_82546_3:
   3843 		delay(5*1000);
   3844 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3845 		break;
   3846 	case WM_T_82541:
   3847 	case WM_T_82541_2:
   3848 	case WM_T_82547:
   3849 	case WM_T_82547_2:
   3850 		delay(20000);
   3851 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3852 		break;
   3853 	case WM_T_82571:
   3854 	case WM_T_82572:
   3855 	case WM_T_82573:
   3856 	case WM_T_82574:
   3857 	case WM_T_82583:
   3858 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3859 			delay(10);
   3860 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3861 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3862 			CSR_WRITE_FLUSH(sc);
   3863 		}
   3864 		/* check EECD_EE_AUTORD */
   3865 		wm_get_auto_rd_done(sc);
   3866 		/*
   3867 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3868 		 * is set.
   3869 		 */
   3870 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3871 		    || (sc->sc_type == WM_T_82583))
   3872 			delay(25*1000);
   3873 		break;
   3874 	case WM_T_82575:
   3875 	case WM_T_82576:
   3876 	case WM_T_82580:
   3877 	case WM_T_I350:
   3878 	case WM_T_I354:
   3879 	case WM_T_I210:
   3880 	case WM_T_I211:
   3881 	case WM_T_80003:
   3882 		/* check EECD_EE_AUTORD */
   3883 		wm_get_auto_rd_done(sc);
   3884 		break;
   3885 	case WM_T_ICH8:
   3886 	case WM_T_ICH9:
   3887 	case WM_T_ICH10:
   3888 	case WM_T_PCH:
   3889 	case WM_T_PCH2:
   3890 	case WM_T_PCH_LPT:
   3891 		break;
   3892 	default:
   3893 		panic("%s: unknown type\n", __func__);
   3894 	}
   3895 
   3896 	/* Check whether EEPROM is present or not */
   3897 	switch (sc->sc_type) {
   3898 	case WM_T_82575:
   3899 	case WM_T_82576:
   3900 	case WM_T_82580:
   3901 	case WM_T_I350:
   3902 	case WM_T_I354:
   3903 	case WM_T_ICH8:
   3904 	case WM_T_ICH9:
   3905 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3906 			/* Not found */
   3907 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3908 			if (sc->sc_type == WM_T_82575)
   3909 				wm_reset_init_script_82575(sc);
   3910 		}
   3911 		break;
   3912 	default:
   3913 		break;
   3914 	}
   3915 
   3916 	if ((sc->sc_type == WM_T_82580)
   3917 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3918 		/* clear global device reset status bit */
   3919 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3920 	}
   3921 
   3922 	/* Clear any pending interrupt events. */
   3923 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3924 	reg = CSR_READ(sc, WMREG_ICR);
   3925 	if (sc->sc_nintrs > 1) {
   3926 		if (sc->sc_type != WM_T_82574) {
   3927 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3928 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3929 		} else
   3930 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3931 	}
   3932 
   3933 	/* reload sc_ctrl */
   3934 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3935 
   3936 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3937 		wm_set_eee_i350(sc);
   3938 
   3939 	/* dummy read from WUC */
   3940 	if (sc->sc_type == WM_T_PCH)
   3941 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3942 	/*
   3943 	 * For PCH, this write will make sure that any noise will be detected
   3944 	 * as a CRC error and be dropped rather than show up as a bad packet
   3945 	 * to the DMA engine
   3946 	 */
   3947 	if (sc->sc_type == WM_T_PCH)
   3948 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3949 
   3950 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3951 		CSR_WRITE(sc, WMREG_WUC, 0);
   3952 
   3953 	wm_reset_mdicnfg_82580(sc);
   3954 
   3955 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3956 		wm_pll_workaround_i210(sc);
   3957 }
   3958 
   3959 /*
   3960  * wm_add_rxbuf:
   3961  *
   3962  *	Add a receive buffer to the indiciated descriptor.
   3963  */
   3964 static int
   3965 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   3966 {
   3967 	struct wm_softc *sc = rxq->rxq_sc;
   3968 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   3969 	struct mbuf *m;
   3970 	int error;
   3971 
   3972 	KASSERT(WM_RX_LOCKED(rxq));
   3973 
   3974 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3975 	if (m == NULL)
   3976 		return ENOBUFS;
   3977 
   3978 	MCLGET(m, M_DONTWAIT);
   3979 	if ((m->m_flags & M_EXT) == 0) {
   3980 		m_freem(m);
   3981 		return ENOBUFS;
   3982 	}
   3983 
   3984 	if (rxs->rxs_mbuf != NULL)
   3985 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3986 
   3987 	rxs->rxs_mbuf = m;
   3988 
   3989 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3990 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3991 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3992 	if (error) {
   3993 		/* XXX XXX XXX */
   3994 		aprint_error_dev(sc->sc_dev,
   3995 		    "unable to load rx DMA map %d, error = %d\n",
   3996 		    idx, error);
   3997 		panic("wm_add_rxbuf");
   3998 	}
   3999 
   4000 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4001 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4002 
   4003 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4004 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4005 			wm_init_rxdesc(rxq, idx);
   4006 	} else
   4007 		wm_init_rxdesc(rxq, idx);
   4008 
   4009 	return 0;
   4010 }
   4011 
   4012 /*
   4013  * wm_rxdrain:
   4014  *
   4015  *	Drain the receive queue.
   4016  */
   4017 static void
   4018 wm_rxdrain(struct wm_rxqueue *rxq)
   4019 {
   4020 	struct wm_softc *sc = rxq->rxq_sc;
   4021 	struct wm_rxsoft *rxs;
   4022 	int i;
   4023 
   4024 	KASSERT(WM_RX_LOCKED(rxq));
   4025 
   4026 	for (i = 0; i < WM_NRXDESC; i++) {
   4027 		rxs = &rxq->rxq_soft[i];
   4028 		if (rxs->rxs_mbuf != NULL) {
   4029 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4030 			m_freem(rxs->rxs_mbuf);
   4031 			rxs->rxs_mbuf = NULL;
   4032 		}
   4033 	}
   4034 }
   4035 
   4036 
   4037 #ifdef WM_MSI_MSIX
   4038 /*
   4039  * Both single interrupt MSI and INTx can use this function.
   4040  */
   4041 static int
   4042 wm_setup_legacy(struct wm_softc *sc)
   4043 {
   4044 	pci_chipset_tag_t pc = sc->sc_pc;
   4045 	const char *intrstr = NULL;
   4046 	char intrbuf[PCI_INTRSTR_LEN];
   4047 
   4048 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4049 	    sizeof(intrbuf));
   4050 #ifdef WM_MPSAFE
   4051 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4052 #endif
   4053 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4054 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4055 	if (sc->sc_ihs[0] == NULL) {
   4056 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4057 		    (pci_intr_type(sc->sc_intrs[0])
   4058 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4059 		return ENOMEM;
   4060 	}
   4061 
   4062 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4063 	sc->sc_nintrs = 1;
   4064 	return 0;
   4065 }
   4066 
   4067 struct _msix_matrix {
   4068 	const char *intrname;
   4069 	int(*func)(void *);
   4070 	int intridx;
   4071 	int cpuid;
   4072 } msix_matrix[WM_MSIX_NINTR] = {
   4073 	{ "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
   4074 	{ "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
   4075 	{ "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
   4076 	  WM_MSIX_LINKINTR_CPUID },
   4077 };
   4078 
   4079 static int
   4080 wm_setup_msix(struct wm_softc *sc)
   4081 {
   4082 	void *vih;
   4083 	kcpuset_t *affinity;
   4084 	int i, error;
   4085 	pci_chipset_tag_t pc = sc->sc_pc;
   4086 	const char *intrstr = NULL;
   4087 	char intrbuf[PCI_INTRSTR_LEN];
   4088 	char intr_xname[INTRDEVNAMEBUF];
   4089 
   4090 	kcpuset_create(&affinity, false);
   4091 
   4092 	for (i = 0; i < WM_MSIX_NINTR; i++) {
   4093 		intrstr = pci_intr_string(pc,
   4094 		    sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
   4095 		    sizeof(intrbuf));
   4096 #ifdef WM_MPSAFE
   4097 		pci_intr_setattr(pc,
   4098 		    &sc->sc_intrs[msix_matrix[i].intridx],
   4099 		    PCI_INTR_MPSAFE, true);
   4100 #endif
   4101 		memset(intr_xname, 0, sizeof(intr_xname));
   4102 		strlcat(intr_xname, device_xname(sc->sc_dev),
   4103 		    sizeof(intr_xname));
   4104 		strlcat(intr_xname, msix_matrix[i].intrname,
   4105 		    sizeof(intr_xname));
   4106 		vih = pci_intr_establish_xname(pc,
   4107 		    sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
   4108 		    msix_matrix[i].func, sc, intr_xname);
   4109 		if (vih == NULL) {
   4110 			aprint_error_dev(sc->sc_dev,
   4111 			    "unable to establish MSI-X(for %s)%s%s\n",
   4112 			    msix_matrix[i].intrname,
   4113 			    intrstr ? " at " : "",
   4114 			    intrstr ? intrstr : "");
   4115 			kcpuset_destroy(affinity);
   4116 
   4117 			return ENOMEM;
   4118 		}
   4119 		kcpuset_zero(affinity);
   4120 		/* Round-robin affinity */
   4121 		kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
   4122 		error = interrupt_distribute(vih, affinity, NULL);
   4123 		if (error == 0) {
   4124 			aprint_normal_dev(sc->sc_dev,
   4125 			    "for %s interrupting at %s affinity to %u\n",
   4126 			    msix_matrix[i].intrname, intrstr,
   4127 			    msix_matrix[i].cpuid % ncpu);
   4128 		} else {
   4129 			aprint_normal_dev(sc->sc_dev,
   4130 			    "for %s interrupting at %s\n",
   4131 			    msix_matrix[i].intrname, intrstr);
   4132 		}
   4133 		sc->sc_ihs[msix_matrix[i].intridx] = vih;
   4134 	}
   4135 
   4136 	sc->sc_nintrs = WM_MSIX_NINTR;
   4137 	kcpuset_destroy(affinity);
   4138 	return 0;
   4139 }
   4140 #endif
   4141 
   4142 /*
   4143  * wm_init:		[ifnet interface function]
   4144  *
   4145  *	Initialize the interface.
   4146  */
   4147 static int
   4148 wm_init(struct ifnet *ifp)
   4149 {
   4150 	struct wm_softc *sc = ifp->if_softc;
   4151 	int ret;
   4152 
   4153 	WM_CORE_LOCK(sc);
   4154 	ret = wm_init_locked(ifp);
   4155 	WM_CORE_UNLOCK(sc);
   4156 
   4157 	return ret;
   4158 }
   4159 
   4160 static int
   4161 wm_init_locked(struct ifnet *ifp)
   4162 {
   4163 	struct wm_softc *sc = ifp->if_softc;
   4164 	int i, j, trynum, error = 0;
   4165 	uint32_t reg;
   4166 
   4167 	KASSERT(WM_CORE_LOCKED(sc));
   4168 	/*
   4169 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4170 	 * There is a small but measurable benefit to avoiding the adjusment
   4171 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4172 	 * on such platforms.  One possibility is that the DMA itself is
   4173 	 * slightly more efficient if the front of the entire packet (instead
   4174 	 * of the front of the headers) is aligned.
   4175 	 *
   4176 	 * Note we must always set align_tweak to 0 if we are using
   4177 	 * jumbo frames.
   4178 	 */
   4179 #ifdef __NO_STRICT_ALIGNMENT
   4180 	sc->sc_align_tweak = 0;
   4181 #else
   4182 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4183 		sc->sc_align_tweak = 0;
   4184 	else
   4185 		sc->sc_align_tweak = 2;
   4186 #endif /* __NO_STRICT_ALIGNMENT */
   4187 
   4188 	/* Cancel any pending I/O. */
   4189 	wm_stop_locked(ifp, 0);
   4190 
   4191 	/* update statistics before reset */
   4192 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4193 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4194 
   4195 	/* Reset the chip to a known state. */
   4196 	wm_reset(sc);
   4197 
   4198 	switch (sc->sc_type) {
   4199 	case WM_T_82571:
   4200 	case WM_T_82572:
   4201 	case WM_T_82573:
   4202 	case WM_T_82574:
   4203 	case WM_T_82583:
   4204 	case WM_T_80003:
   4205 	case WM_T_ICH8:
   4206 	case WM_T_ICH9:
   4207 	case WM_T_ICH10:
   4208 	case WM_T_PCH:
   4209 	case WM_T_PCH2:
   4210 	case WM_T_PCH_LPT:
   4211 		if (wm_check_mng_mode(sc) != 0)
   4212 			wm_get_hw_control(sc);
   4213 		break;
   4214 	default:
   4215 		break;
   4216 	}
   4217 
   4218 	/* Init hardware bits */
   4219 	wm_initialize_hardware_bits(sc);
   4220 
   4221 	/* Reset the PHY. */
   4222 	if (sc->sc_flags & WM_F_HAS_MII)
   4223 		wm_gmii_reset(sc);
   4224 
   4225 	/* Calculate (E)ITR value */
   4226 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4227 		sc->sc_itr = 450;	/* For EITR */
   4228 	} else if (sc->sc_type >= WM_T_82543) {
   4229 		/*
   4230 		 * Set up the interrupt throttling register (units of 256ns)
   4231 		 * Note that a footnote in Intel's documentation says this
   4232 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4233 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4234 		 * that that is also true for the 1024ns units of the other
   4235 		 * interrupt-related timer registers -- so, really, we ought
   4236 		 * to divide this value by 4 when the link speed is low.
   4237 		 *
   4238 		 * XXX implement this division at link speed change!
   4239 		 */
   4240 
   4241 		/*
   4242 		 * For N interrupts/sec, set this value to:
   4243 		 * 1000000000 / (N * 256).  Note that we set the
   4244 		 * absolute and packet timer values to this value
   4245 		 * divided by 4 to get "simple timer" behavior.
   4246 		 */
   4247 
   4248 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4249 	}
   4250 
   4251 	error = wm_init_txrx_queues(sc);
   4252 	if (error)
   4253 		goto out;
   4254 
   4255 	/*
   4256 	 * Clear out the VLAN table -- we don't use it (yet).
   4257 	 */
   4258 	CSR_WRITE(sc, WMREG_VET, 0);
   4259 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4260 		trynum = 10; /* Due to hw errata */
   4261 	else
   4262 		trynum = 1;
   4263 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4264 		for (j = 0; j < trynum; j++)
   4265 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4266 
   4267 	/*
   4268 	 * Set up flow-control parameters.
   4269 	 *
   4270 	 * XXX Values could probably stand some tuning.
   4271 	 */
   4272 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4273 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4274 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4275 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4276 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4277 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4278 	}
   4279 
   4280 	sc->sc_fcrtl = FCRTL_DFLT;
   4281 	if (sc->sc_type < WM_T_82543) {
   4282 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4283 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4284 	} else {
   4285 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4286 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4287 	}
   4288 
   4289 	if (sc->sc_type == WM_T_80003)
   4290 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4291 	else
   4292 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4293 
   4294 	/* Writes the control register. */
   4295 	wm_set_vlan(sc);
   4296 
   4297 	if (sc->sc_flags & WM_F_HAS_MII) {
   4298 		int val;
   4299 
   4300 		switch (sc->sc_type) {
   4301 		case WM_T_80003:
   4302 		case WM_T_ICH8:
   4303 		case WM_T_ICH9:
   4304 		case WM_T_ICH10:
   4305 		case WM_T_PCH:
   4306 		case WM_T_PCH2:
   4307 		case WM_T_PCH_LPT:
   4308 			/*
   4309 			 * Set the mac to wait the maximum time between each
   4310 			 * iteration and increase the max iterations when
   4311 			 * polling the phy; this fixes erroneous timeouts at
   4312 			 * 10Mbps.
   4313 			 */
   4314 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4315 			    0xFFFF);
   4316 			val = wm_kmrn_readreg(sc,
   4317 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4318 			val |= 0x3F;
   4319 			wm_kmrn_writereg(sc,
   4320 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4321 			break;
   4322 		default:
   4323 			break;
   4324 		}
   4325 
   4326 		if (sc->sc_type == WM_T_80003) {
   4327 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4328 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4329 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4330 
   4331 			/* Bypass RX and TX FIFO's */
   4332 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4333 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4334 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4335 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4336 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4337 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4338 		}
   4339 	}
   4340 #if 0
   4341 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4342 #endif
   4343 
   4344 	/* Set up checksum offload parameters. */
   4345 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4346 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4347 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4348 		reg |= RXCSUM_IPOFL;
   4349 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4350 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4351 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4352 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4353 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4354 
   4355 	/* Set up MSI-X */
   4356 	if (sc->sc_nintrs > 1) {
   4357 		uint32_t ivar;
   4358 
   4359 		if (sc->sc_type == WM_T_82575) {
   4360 			/* Interrupt control */
   4361 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4362 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4363 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4364 
   4365 			/* TX */
   4366 			CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
   4367 			    EITR_TX_QUEUE(0));
   4368 			/* RX */
   4369 			CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
   4370 			    EITR_RX_QUEUE(0));
   4371 			/* Link status */
   4372 			CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
   4373 			    EITR_OTHER);
   4374 		} else if (sc->sc_type == WM_T_82574) {
   4375 			/* Interrupt control */
   4376 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4377 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4378 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4379 
   4380 			/* TX, RX and Link status */
   4381 			ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
   4382 			    IVAR_TX_MASK_Q_82574(0));
   4383 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4384 				| WM_MSIX_RXINTR_IDX),
   4385 			    IVAR_RX_MASK_Q_82574(0));
   4386 			ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
   4387 			    IVAR_OTHER_MASK);
   4388 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4389 		} else {
   4390 			/* Interrupt control */
   4391 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
   4392 			    | GPIE_MULTI_MSIX | GPIE_EIAME
   4393 			    | GPIE_PBA);
   4394 
   4395 			switch (sc->sc_type) {
   4396 			case WM_T_82580:
   4397 			case WM_T_I350:
   4398 			case WM_T_I354:
   4399 			case WM_T_I210:
   4400 			case WM_T_I211:
   4401 				/* TX */
   4402 				ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
   4403 				ivar &= ~IVAR_TX_MASK_Q(0);
   4404 				ivar |= __SHIFTIN(
   4405 					(WM_MSIX_TXINTR_IDX | IVAR_VALID),
   4406 					IVAR_TX_MASK_Q(0));
   4407 				CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
   4408 
   4409 				/* RX */
   4410 				ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
   4411 				ivar &= ~IVAR_RX_MASK_Q(0);
   4412 				ivar |= __SHIFTIN(
   4413 					(WM_MSIX_RXINTR_IDX | IVAR_VALID),
   4414 					IVAR_RX_MASK_Q(0));
   4415 				CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
   4416 				break;
   4417 			case WM_T_82576:
   4418 				/* TX */
   4419 				ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
   4420 				ivar &= ~IVAR_TX_MASK_Q_82576(0);
   4421 				ivar |= __SHIFTIN(
   4422 					(WM_MSIX_TXINTR_IDX | IVAR_VALID),
   4423 					IVAR_TX_MASK_Q_82576(0));
   4424 				CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
   4425 
   4426 				/* RX */
   4427 				ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
   4428 				ivar &= ~IVAR_RX_MASK_Q_82576(0);
   4429 				ivar |= __SHIFTIN(
   4430 					(WM_MSIX_RXINTR_IDX | IVAR_VALID),
   4431 					IVAR_RX_MASK_Q_82576(0));
   4432 				CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
   4433 				break;
   4434 			default:
   4435 				break;
   4436 			}
   4437 
   4438 			/* Link status */
   4439 			ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
   4440 			    IVAR_MISC_OTHER);
   4441 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4442 		}
   4443 	}
   4444 
   4445 	/* Set up the interrupt registers. */
   4446 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4447 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4448 	    ICR_RXO | ICR_RXT0;
   4449 	if (sc->sc_nintrs > 1) {
   4450 		uint32_t mask;
   4451 		switch (sc->sc_type) {
   4452 		case WM_T_82574:
   4453 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4454 			    WMREG_EIAC_82574_MSIX_MASK);
   4455 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4456 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4457 			break;
   4458 		default:
   4459 			if (sc->sc_type == WM_T_82575)
   4460 				mask = EITR_RX_QUEUE(0) |EITR_TX_QUEUE(0)
   4461 				    | EITR_OTHER;
   4462 			else
   4463 				mask = (1 << WM_MSIX_RXINTR_IDX)
   4464 				    | (1 << WM_MSIX_TXINTR_IDX)
   4465 				    | (1 << WM_MSIX_LINKINTR_IDX);
   4466 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4467 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4468 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4469 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4470 			break;
   4471 		}
   4472 	} else
   4473 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4474 
   4475 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4476 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4477 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4478 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4479 		reg |= KABGTXD_BGSQLBIAS;
   4480 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4481 	}
   4482 
   4483 	/* Set up the inter-packet gap. */
   4484 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4485 
   4486 	if (sc->sc_type >= WM_T_82543) {
   4487 		/*
   4488 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4489 		 * the multi queue function with MSI-X.
   4490 		 */
   4491 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4492 			if (sc->sc_nintrs > 1) {
   4493 				CSR_WRITE(sc, WMREG_EITR(WM_MSIX_RXINTR_IDX),
   4494 				    sc->sc_itr);
   4495 				CSR_WRITE(sc, WMREG_EITR(WM_MSIX_TXINTR_IDX),
   4496 				    sc->sc_itr);
   4497 				/*
   4498 				 * Link interrupts occur much less than TX
   4499 				 * interrupts and RX interrupts. So, we don't
   4500 				 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4501 				 * FreeBSD's if_igb.
   4502 				 */
   4503 			} else
   4504 				CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
   4505 		} else
   4506 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4507 	}
   4508 
   4509 	/* Set the VLAN ethernetype. */
   4510 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4511 
   4512 	/*
   4513 	 * Set up the transmit control register; we start out with
   4514 	 * a collision distance suitable for FDX, but update it whe
   4515 	 * we resolve the media type.
   4516 	 */
   4517 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4518 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4519 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4520 	if (sc->sc_type >= WM_T_82571)
   4521 		sc->sc_tctl |= TCTL_MULR;
   4522 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4523 
   4524 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4525 		/* Write TDT after TCTL.EN is set. See the document. */
   4526 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4527 	}
   4528 
   4529 	if (sc->sc_type == WM_T_80003) {
   4530 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4531 		reg &= ~TCTL_EXT_GCEX_MASK;
   4532 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4533 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4534 	}
   4535 
   4536 	/* Set the media. */
   4537 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4538 		goto out;
   4539 
   4540 	/* Configure for OS presence */
   4541 	wm_init_manageability(sc);
   4542 
   4543 	/*
   4544 	 * Set up the receive control register; we actually program
   4545 	 * the register when we set the receive filter.  Use multicast
   4546 	 * address offset type 0.
   4547 	 *
   4548 	 * Only the i82544 has the ability to strip the incoming
   4549 	 * CRC, so we don't enable that feature.
   4550 	 */
   4551 	sc->sc_mchash_type = 0;
   4552 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4553 	    | RCTL_MO(sc->sc_mchash_type);
   4554 
   4555 	/*
   4556 	 * The I350 has a bug where it always strips the CRC whether
   4557 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4558 	 */
   4559 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4560 	    || (sc->sc_type == WM_T_I210))
   4561 		sc->sc_rctl |= RCTL_SECRC;
   4562 
   4563 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4564 	    && (ifp->if_mtu > ETHERMTU)) {
   4565 		sc->sc_rctl |= RCTL_LPE;
   4566 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4567 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4568 	}
   4569 
   4570 	if (MCLBYTES == 2048) {
   4571 		sc->sc_rctl |= RCTL_2k;
   4572 	} else {
   4573 		if (sc->sc_type >= WM_T_82543) {
   4574 			switch (MCLBYTES) {
   4575 			case 4096:
   4576 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4577 				break;
   4578 			case 8192:
   4579 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4580 				break;
   4581 			case 16384:
   4582 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4583 				break;
   4584 			default:
   4585 				panic("wm_init: MCLBYTES %d unsupported",
   4586 				    MCLBYTES);
   4587 				break;
   4588 			}
   4589 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4590 	}
   4591 
   4592 	/* Set the receive filter. */
   4593 	wm_set_filter(sc);
   4594 
   4595 	/* Enable ECC */
   4596 	switch (sc->sc_type) {
   4597 	case WM_T_82571:
   4598 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4599 		reg |= PBA_ECC_CORR_EN;
   4600 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4601 		break;
   4602 	case WM_T_PCH_LPT:
   4603 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4604 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4605 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4606 
   4607 		reg = CSR_READ(sc, WMREG_CTRL);
   4608 		reg |= CTRL_MEHE;
   4609 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4610 		break;
   4611 	default:
   4612 		break;
   4613 	}
   4614 
   4615 	/* On 575 and later set RDT only if RX enabled */
   4616 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4617 		struct wm_rxqueue *rxq = sc->sc_rxq;
   4618 		WM_RX_LOCK(rxq);
   4619 		for (i = 0; i < WM_NRXDESC; i++)
   4620 			wm_init_rxdesc(rxq, i);
   4621 		WM_RX_UNLOCK(rxq);
   4622 	}
   4623 
   4624 	sc->sc_stopping = false;
   4625 
   4626 	/* Start the one second link check clock. */
   4627 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4628 
   4629 	/* ...all done! */
   4630 	ifp->if_flags |= IFF_RUNNING;
   4631 	ifp->if_flags &= ~IFF_OACTIVE;
   4632 
   4633  out:
   4634 	sc->sc_if_flags = ifp->if_flags;
   4635 	if (error)
   4636 		log(LOG_ERR, "%s: interface not running\n",
   4637 		    device_xname(sc->sc_dev));
   4638 	return error;
   4639 }
   4640 
   4641 /*
   4642  * wm_stop:		[ifnet interface function]
   4643  *
   4644  *	Stop transmission on the interface.
   4645  */
   4646 static void
   4647 wm_stop(struct ifnet *ifp, int disable)
   4648 {
   4649 	struct wm_softc *sc = ifp->if_softc;
   4650 
   4651 	WM_CORE_LOCK(sc);
   4652 	wm_stop_locked(ifp, disable);
   4653 	WM_CORE_UNLOCK(sc);
   4654 }
   4655 
   4656 static void
   4657 wm_stop_locked(struct ifnet *ifp, int disable)
   4658 {
   4659 	struct wm_softc *sc = ifp->if_softc;
   4660 	struct wm_txqueue *txq = sc->sc_txq;
   4661 	struct wm_rxqueue *rxq = sc->sc_rxq;
   4662 	struct wm_txsoft *txs;
   4663 	int i;
   4664 
   4665 	KASSERT(WM_CORE_LOCKED(sc));
   4666 
   4667 	sc->sc_stopping = true;
   4668 
   4669 	/* Stop the one second clock. */
   4670 	callout_stop(&sc->sc_tick_ch);
   4671 
   4672 	/* Stop the 82547 Tx FIFO stall check timer. */
   4673 	if (sc->sc_type == WM_T_82547)
   4674 		callout_stop(&sc->sc_txfifo_ch);
   4675 
   4676 	if (sc->sc_flags & WM_F_HAS_MII) {
   4677 		/* Down the MII. */
   4678 		mii_down(&sc->sc_mii);
   4679 	} else {
   4680 #if 0
   4681 		/* Should we clear PHY's status properly? */
   4682 		wm_reset(sc);
   4683 #endif
   4684 	}
   4685 
   4686 	/* Stop the transmit and receive processes. */
   4687 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4688 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4689 	sc->sc_rctl &= ~RCTL_EN;
   4690 
   4691 	/*
   4692 	 * Clear the interrupt mask to ensure the device cannot assert its
   4693 	 * interrupt line.
   4694 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   4695 	 * service any currently pending or shared interrupt.
   4696 	 */
   4697 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4698 	sc->sc_icr = 0;
   4699 	if (sc->sc_nintrs > 1) {
   4700 		if (sc->sc_type != WM_T_82574) {
   4701 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4702 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4703 		} else
   4704 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4705 	}
   4706 
   4707 	/* Release any queued transmit buffers. */
   4708 	WM_TX_LOCK(txq);
   4709 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   4710 		txs = &txq->txq_soft[i];
   4711 		if (txs->txs_mbuf != NULL) {
   4712 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4713 			m_freem(txs->txs_mbuf);
   4714 			txs->txs_mbuf = NULL;
   4715 		}
   4716 	}
   4717 	WM_TX_UNLOCK(txq);
   4718 
   4719 	/* Mark the interface as down and cancel the watchdog timer. */
   4720 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4721 	ifp->if_timer = 0;
   4722 
   4723 	if (disable) {
   4724 		WM_RX_LOCK(rxq);
   4725 		wm_rxdrain(rxq);
   4726 		WM_RX_UNLOCK(rxq);
   4727 	}
   4728 
   4729 #if 0 /* notyet */
   4730 	if (sc->sc_type >= WM_T_82544)
   4731 		CSR_WRITE(sc, WMREG_WUC, 0);
   4732 #endif
   4733 }
   4734 
   4735 /*
   4736  * wm_tx_offload:
   4737  *
   4738  *	Set up TCP/IP checksumming parameters for the
   4739  *	specified packet.
   4740  */
   4741 static int
   4742 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4743     uint8_t *fieldsp)
   4744 {
   4745 	struct wm_txqueue *txq = sc->sc_txq;
   4746 	struct mbuf *m0 = txs->txs_mbuf;
   4747 	struct livengood_tcpip_ctxdesc *t;
   4748 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4749 	uint32_t ipcse;
   4750 	struct ether_header *eh;
   4751 	int offset, iphl;
   4752 	uint8_t fields;
   4753 
   4754 	/*
   4755 	 * XXX It would be nice if the mbuf pkthdr had offset
   4756 	 * fields for the protocol headers.
   4757 	 */
   4758 
   4759 	eh = mtod(m0, struct ether_header *);
   4760 	switch (htons(eh->ether_type)) {
   4761 	case ETHERTYPE_IP:
   4762 	case ETHERTYPE_IPV6:
   4763 		offset = ETHER_HDR_LEN;
   4764 		break;
   4765 
   4766 	case ETHERTYPE_VLAN:
   4767 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4768 		break;
   4769 
   4770 	default:
   4771 		/*
   4772 		 * Don't support this protocol or encapsulation.
   4773 		 */
   4774 		*fieldsp = 0;
   4775 		*cmdp = 0;
   4776 		return 0;
   4777 	}
   4778 
   4779 	if ((m0->m_pkthdr.csum_flags &
   4780 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4781 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4782 	} else {
   4783 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4784 	}
   4785 	ipcse = offset + iphl - 1;
   4786 
   4787 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4788 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4789 	seg = 0;
   4790 	fields = 0;
   4791 
   4792 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4793 		int hlen = offset + iphl;
   4794 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4795 
   4796 		if (__predict_false(m0->m_len <
   4797 				    (hlen + sizeof(struct tcphdr)))) {
   4798 			/*
   4799 			 * TCP/IP headers are not in the first mbuf; we need
   4800 			 * to do this the slow and painful way.  Let's just
   4801 			 * hope this doesn't happen very often.
   4802 			 */
   4803 			struct tcphdr th;
   4804 
   4805 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4806 
   4807 			m_copydata(m0, hlen, sizeof(th), &th);
   4808 			if (v4) {
   4809 				struct ip ip;
   4810 
   4811 				m_copydata(m0, offset, sizeof(ip), &ip);
   4812 				ip.ip_len = 0;
   4813 				m_copyback(m0,
   4814 				    offset + offsetof(struct ip, ip_len),
   4815 				    sizeof(ip.ip_len), &ip.ip_len);
   4816 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4817 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4818 			} else {
   4819 				struct ip6_hdr ip6;
   4820 
   4821 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4822 				ip6.ip6_plen = 0;
   4823 				m_copyback(m0,
   4824 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4825 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4826 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4827 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4828 			}
   4829 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4830 			    sizeof(th.th_sum), &th.th_sum);
   4831 
   4832 			hlen += th.th_off << 2;
   4833 		} else {
   4834 			/*
   4835 			 * TCP/IP headers are in the first mbuf; we can do
   4836 			 * this the easy way.
   4837 			 */
   4838 			struct tcphdr *th;
   4839 
   4840 			if (v4) {
   4841 				struct ip *ip =
   4842 				    (void *)(mtod(m0, char *) + offset);
   4843 				th = (void *)(mtod(m0, char *) + hlen);
   4844 
   4845 				ip->ip_len = 0;
   4846 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4847 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4848 			} else {
   4849 				struct ip6_hdr *ip6 =
   4850 				    (void *)(mtod(m0, char *) + offset);
   4851 				th = (void *)(mtod(m0, char *) + hlen);
   4852 
   4853 				ip6->ip6_plen = 0;
   4854 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4855 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4856 			}
   4857 			hlen += th->th_off << 2;
   4858 		}
   4859 
   4860 		if (v4) {
   4861 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4862 			cmdlen |= WTX_TCPIP_CMD_IP;
   4863 		} else {
   4864 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4865 			ipcse = 0;
   4866 		}
   4867 		cmd |= WTX_TCPIP_CMD_TSE;
   4868 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4869 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4870 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4871 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4872 	}
   4873 
   4874 	/*
   4875 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4876 	 * offload feature, if we load the context descriptor, we
   4877 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4878 	 */
   4879 
   4880 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4881 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4882 	    WTX_TCPIP_IPCSE(ipcse);
   4883 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4884 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4885 		fields |= WTX_IXSM;
   4886 	}
   4887 
   4888 	offset += iphl;
   4889 
   4890 	if (m0->m_pkthdr.csum_flags &
   4891 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4892 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4893 		fields |= WTX_TXSM;
   4894 		tucs = WTX_TCPIP_TUCSS(offset) |
   4895 		    WTX_TCPIP_TUCSO(offset +
   4896 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4897 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4898 	} else if ((m0->m_pkthdr.csum_flags &
   4899 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4900 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4901 		fields |= WTX_TXSM;
   4902 		tucs = WTX_TCPIP_TUCSS(offset) |
   4903 		    WTX_TCPIP_TUCSO(offset +
   4904 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4905 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4906 	} else {
   4907 		/* Just initialize it to a valid TCP context. */
   4908 		tucs = WTX_TCPIP_TUCSS(offset) |
   4909 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4910 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4911 	}
   4912 
   4913 	/* Fill in the context descriptor. */
   4914 	t = (struct livengood_tcpip_ctxdesc *)
   4915 	    &txq->txq_descs[txq->txq_next];
   4916 	t->tcpip_ipcs = htole32(ipcs);
   4917 	t->tcpip_tucs = htole32(tucs);
   4918 	t->tcpip_cmdlen = htole32(cmdlen);
   4919 	t->tcpip_seg = htole32(seg);
   4920 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   4921 
   4922 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4923 	txs->txs_ndesc++;
   4924 
   4925 	*cmdp = cmd;
   4926 	*fieldsp = fields;
   4927 
   4928 	return 0;
   4929 }
   4930 
   4931 static void
   4932 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4933 {
   4934 	struct mbuf *m;
   4935 	int i;
   4936 
   4937 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4938 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4939 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4940 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4941 		    m->m_data, m->m_len, m->m_flags);
   4942 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4943 	    i, i == 1 ? "" : "s");
   4944 }
   4945 
   4946 /*
   4947  * wm_82547_txfifo_stall:
   4948  *
   4949  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4950  *	reset the FIFO pointers, and restart packet transmission.
   4951  */
   4952 static void
   4953 wm_82547_txfifo_stall(void *arg)
   4954 {
   4955 	struct wm_softc *sc = arg;
   4956 	struct wm_txqueue *txq = sc->sc_txq;
   4957 #ifndef WM_MPSAFE
   4958 	int s;
   4959 
   4960 	s = splnet();
   4961 #endif
   4962 	WM_TX_LOCK(txq);
   4963 
   4964 	if (sc->sc_stopping)
   4965 		goto out;
   4966 
   4967 	if (txq->txq_fifo_stall) {
   4968 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   4969 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4970 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4971 			/*
   4972 			 * Packets have drained.  Stop transmitter, reset
   4973 			 * FIFO pointers, restart transmitter, and kick
   4974 			 * the packet queue.
   4975 			 */
   4976 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4977 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4978 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   4979 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   4980 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   4981 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   4982 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4983 			CSR_WRITE_FLUSH(sc);
   4984 
   4985 			txq->txq_fifo_head = 0;
   4986 			txq->txq_fifo_stall = 0;
   4987 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4988 		} else {
   4989 			/*
   4990 			 * Still waiting for packets to drain; try again in
   4991 			 * another tick.
   4992 			 */
   4993 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4994 		}
   4995 	}
   4996 
   4997 out:
   4998 	WM_TX_UNLOCK(txq);
   4999 #ifndef WM_MPSAFE
   5000 	splx(s);
   5001 #endif
   5002 }
   5003 
   5004 /*
   5005  * wm_82547_txfifo_bugchk:
   5006  *
   5007  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5008  *	prevent enqueueing a packet that would wrap around the end
   5009  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5010  *
   5011  *	We do this by checking the amount of space before the end
   5012  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5013  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5014  *	the internal FIFO pointers to the beginning, and restart
   5015  *	transmission on the interface.
   5016  */
   5017 #define	WM_FIFO_HDR		0x10
   5018 #define	WM_82547_PAD_LEN	0x3e0
   5019 static int
   5020 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5021 {
   5022 	struct wm_txqueue *txq = sc->sc_txq;
   5023 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5024 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5025 
   5026 	/* Just return if already stalled. */
   5027 	if (txq->txq_fifo_stall)
   5028 		return 1;
   5029 
   5030 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5031 		/* Stall only occurs in half-duplex mode. */
   5032 		goto send_packet;
   5033 	}
   5034 
   5035 	if (len >= WM_82547_PAD_LEN + space) {
   5036 		txq->txq_fifo_stall = 1;
   5037 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5038 		return 1;
   5039 	}
   5040 
   5041  send_packet:
   5042 	txq->txq_fifo_head += len;
   5043 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5044 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5045 
   5046 	return 0;
   5047 }
   5048 
   5049 static int
   5050 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5051 {
   5052 	int error;
   5053 
   5054 	/*
   5055 	 * Allocate the control data structures, and create and load the
   5056 	 * DMA map for it.
   5057 	 *
   5058 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5059 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5060 	 * both sets within the same 4G segment.
   5061 	 */
   5062 	if (sc->sc_type < WM_T_82544) {
   5063 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5064 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
   5065 	} else {
   5066 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5067 		txq->txq_desc_size = sizeof(txdescs_t);
   5068 	}
   5069 
   5070 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
   5071 		    (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
   5072 		    &txq->txq_desc_rseg, 0)) != 0) {
   5073 		aprint_error_dev(sc->sc_dev,
   5074 		    "unable to allocate TX control data, error = %d\n",
   5075 		    error);
   5076 		goto fail_0;
   5077 	}
   5078 
   5079 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5080 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5081 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5082 		aprint_error_dev(sc->sc_dev,
   5083 		    "unable to map TX control data, error = %d\n", error);
   5084 		goto fail_1;
   5085 	}
   5086 
   5087 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5088 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5089 		aprint_error_dev(sc->sc_dev,
   5090 		    "unable to create TX control data DMA map, error = %d\n",
   5091 		    error);
   5092 		goto fail_2;
   5093 	}
   5094 
   5095 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5096 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5097 		aprint_error_dev(sc->sc_dev,
   5098 		    "unable to load TX control data DMA map, error = %d\n",
   5099 		    error);
   5100 		goto fail_3;
   5101 	}
   5102 
   5103 	return 0;
   5104 
   5105  fail_3:
   5106 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5107  fail_2:
   5108 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5109 	    txq->txq_desc_size);
   5110  fail_1:
   5111 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5112  fail_0:
   5113 	return error;
   5114 }
   5115 
   5116 static void
   5117 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5118 {
   5119 
   5120 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5121 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5122 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5123 	    txq->txq_desc_size);
   5124 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5125 }
   5126 
   5127 static int
   5128 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5129 {
   5130 	int error;
   5131 
   5132 	/*
   5133 	 * Allocate the control data structures, and create and load the
   5134 	 * DMA map for it.
   5135 	 *
   5136 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5137 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5138 	 * both sets within the same 4G segment.
   5139 	 */
   5140 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5141 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
   5142 		    (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
   5143 		    &rxq->rxq_desc_rseg, 0)) != 0) {
   5144 		aprint_error_dev(sc->sc_dev,
   5145 		    "unable to allocate RX control data, error = %d\n",
   5146 		    error);
   5147 		goto fail_0;
   5148 	}
   5149 
   5150 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5151 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5152 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5153 		aprint_error_dev(sc->sc_dev,
   5154 		    "unable to map RX control data, error = %d\n", error);
   5155 		goto fail_1;
   5156 	}
   5157 
   5158 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5159 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5160 		aprint_error_dev(sc->sc_dev,
   5161 		    "unable to create RX control data DMA map, error = %d\n",
   5162 		    error);
   5163 		goto fail_2;
   5164 	}
   5165 
   5166 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5167 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5168 		aprint_error_dev(sc->sc_dev,
   5169 		    "unable to load RX control data DMA map, error = %d\n",
   5170 		    error);
   5171 		goto fail_3;
   5172 	}
   5173 
   5174 	return 0;
   5175 
   5176  fail_3:
   5177 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5178  fail_2:
   5179 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5180 	    rxq->rxq_desc_size);
   5181  fail_1:
   5182 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5183  fail_0:
   5184 	return error;
   5185 }
   5186 
   5187 static void
   5188 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5189 {
   5190 
   5191 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5192 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5193 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5194 	    rxq->rxq_desc_size);
   5195 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5196 }
   5197 
   5198 
   5199 static int
   5200 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5201 {
   5202 	int i, error;
   5203 
   5204 	/* Create the transmit buffer DMA maps. */
   5205 	WM_TXQUEUELEN(txq) =
   5206 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5207 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5208 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5209 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5210 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5211 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5212 			aprint_error_dev(sc->sc_dev,
   5213 			    "unable to create Tx DMA map %d, error = %d\n",
   5214 			    i, error);
   5215 			goto fail;
   5216 		}
   5217 	}
   5218 
   5219 	return 0;
   5220 
   5221  fail:
   5222 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5223 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5224 			bus_dmamap_destroy(sc->sc_dmat,
   5225 			    txq->txq_soft[i].txs_dmamap);
   5226 	}
   5227 	return error;
   5228 }
   5229 
   5230 static void
   5231 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5232 {
   5233 	int i;
   5234 
   5235 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5236 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5237 			bus_dmamap_destroy(sc->sc_dmat,
   5238 			    txq->txq_soft[i].txs_dmamap);
   5239 	}
   5240 }
   5241 
   5242 static int
   5243 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5244 {
   5245 	int i, error;
   5246 
   5247 	/* Create the receive buffer DMA maps. */
   5248 	for (i = 0; i < WM_NRXDESC; i++) {
   5249 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5250 			    MCLBYTES, 0, 0,
   5251 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5252 			aprint_error_dev(sc->sc_dev,
   5253 			    "unable to create Rx DMA map %d error = %d\n",
   5254 			    i, error);
   5255 			goto fail;
   5256 		}
   5257 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5258 	}
   5259 
   5260 	return 0;
   5261 
   5262  fail:
   5263 	for (i = 0; i < WM_NRXDESC; i++) {
   5264 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5265 			bus_dmamap_destroy(sc->sc_dmat,
   5266 			    rxq->rxq_soft[i].rxs_dmamap);
   5267 	}
   5268 	return error;
   5269 }
   5270 
   5271 static void
   5272 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5273 {
   5274 	int i;
   5275 
   5276 	for (i = 0; i < WM_NRXDESC; i++) {
   5277 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5278 			bus_dmamap_destroy(sc->sc_dmat,
   5279 			    rxq->rxq_soft[i].rxs_dmamap);
   5280 	}
   5281 }
   5282 
   5283 /*
   5284  * wm_alloc_quques:
   5285  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5286  */
   5287 static int
   5288 wm_alloc_txrx_queues(struct wm_softc *sc)
   5289 {
   5290 	int error;
   5291 	struct wm_txqueue *txq;
   5292 	struct wm_rxqueue *rxq;
   5293 
   5294 	/*
   5295 	 * For transmission
   5296 	 */
   5297 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5298 	    KM_SLEEP);
   5299 	if (sc->sc_txq == NULL) {
   5300 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
   5301 		error = ENOMEM;
   5302 		goto fail_0;
   5303 	}
   5304 	txq = sc->sc_txq;
   5305 	txq->txq_sc = sc;
   5306 #ifdef WM_MPSAFE
   5307 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5308 #else
   5309 		txq->txq_lock = NULL;
   5310 #endif
   5311 
   5312 		error = wm_alloc_tx_descs(sc, txq);
   5313 	if (error)
   5314 		goto fail_1;
   5315 
   5316 	error = wm_alloc_tx_buffer(sc, txq);
   5317 	if (error)
   5318 		goto fail_2;
   5319 
   5320 	/*
   5321 	 * For recieve
   5322 	 */
   5323 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5324 	    KM_SLEEP);
   5325 	if (sc->sc_rxq == NULL) {
   5326 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
   5327 		error = ENOMEM;
   5328 		goto fail_3;
   5329 	}
   5330 	rxq = sc->sc_rxq;
   5331 	rxq->rxq_sc = sc;
   5332 #ifdef WM_MPSAFE
   5333 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5334 #else
   5335 		rxq->rxq_lock = NULL;
   5336 #endif
   5337 
   5338 		error = wm_alloc_rx_descs(sc, rxq);
   5339 	if (error)
   5340 		goto fail_4;
   5341 
   5342 	error = wm_alloc_rx_buffer(sc, rxq);
   5343 	if (error)
   5344 		goto fail_5;
   5345 
   5346 	return 0;
   5347 
   5348  fail_5:
   5349 	wm_free_rx_descs(sc, rxq);
   5350  fail_4:
   5351 	if (rxq->rxq_lock)
   5352 		mutex_obj_free(rxq->rxq_lock);
   5353 	kmem_free(rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5354  fail_3:
   5355 	wm_free_tx_buffer(sc, txq);
   5356  fail_2:
   5357 	wm_free_tx_descs(sc, txq);
   5358  fail_1:
   5359 	if (txq->txq_lock)
   5360 		mutex_obj_free(txq->txq_lock);
   5361 	kmem_free(txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5362  fail_0:
   5363 	return error;
   5364 }
   5365 
   5366 /*
   5367  * wm_free_quques:
   5368  *	Free {tx,rx}descs and {tx,rx} buffers
   5369  */
   5370 static void
   5371 wm_free_txrx_queues(struct wm_softc *sc)
   5372 {
   5373 	struct wm_txqueue *txq = sc->sc_txq;
   5374 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5375 
   5376 	wm_free_rx_buffer(sc, rxq);
   5377 	wm_free_rx_descs(sc, rxq);
   5378 	if (rxq->rxq_lock)
   5379 		mutex_obj_free(rxq->rxq_lock);
   5380 	kmem_free(rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5381 
   5382 	wm_free_tx_buffer(sc, txq);
   5383 	wm_free_tx_descs(sc, txq);
   5384 	if (txq->txq_lock)
   5385 		mutex_obj_free(txq->txq_lock);
   5386 	kmem_free(txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5387 }
   5388 
   5389 static void
   5390 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5391 {
   5392 
   5393 	KASSERT(WM_TX_LOCKED(txq));
   5394 
   5395 	/* Initialize the transmit descriptor ring. */
   5396 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5397 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5398 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5399 	txq->txq_free = WM_NTXDESC(txq);
   5400 	txq->txq_next = 0;
   5401 }
   5402 
   5403 static void
   5404 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5405 {
   5406 
   5407 	KASSERT(WM_TX_LOCKED(txq));
   5408 
   5409 	if (sc->sc_type < WM_T_82543) {
   5410 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5411 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5412 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5413 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5414 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5415 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5416 	} else {
   5417 		CSR_WRITE(sc, WMREG_TDBAH(0), WM_CDTXADDR_HI(txq, 0));
   5418 		CSR_WRITE(sc, WMREG_TDBAL(0), WM_CDTXADDR_LO(txq, 0));
   5419 		CSR_WRITE(sc, WMREG_TDLEN(0), WM_TXDESCSIZE(txq));
   5420 		CSR_WRITE(sc, WMREG_TDH(0), 0);
   5421 
   5422 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5423 			/*
   5424 			 * Don't write TDT before TCTL.EN is set.
   5425 			 * See the document.
   5426 			 */
   5427 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
   5428 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5429 			    | TXDCTL_WTHRESH(0));
   5430 		else {
   5431 			/* ITR / 4 */
   5432 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5433 			if (sc->sc_type >= WM_T_82540) {
   5434 				/* should be same */
   5435 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5436 			}
   5437 
   5438 			CSR_WRITE(sc, WMREG_TDT(0), 0);
   5439 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
   5440 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5441 		}
   5442 	}
   5443 }
   5444 
   5445 static void
   5446 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5447 {
   5448 	int i;
   5449 
   5450 	KASSERT(WM_TX_LOCKED(txq));
   5451 
   5452 	/* Initialize the transmit job descriptors. */
   5453 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5454 		txq->txq_soft[i].txs_mbuf = NULL;
   5455 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5456 	txq->txq_snext = 0;
   5457 	txq->txq_sdirty = 0;
   5458 }
   5459 
   5460 static void
   5461 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5462 {
   5463 
   5464 	KASSERT(WM_TX_LOCKED(txq));
   5465 
   5466 	/*
   5467 	 * Set up some register offsets that are different between
   5468 	 * the i82542 and the i82543 and later chips.
   5469 	 */
   5470 	if (sc->sc_type < WM_T_82543) {
   5471 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5472 	} else {
   5473 		txq->txq_tdt_reg = WMREG_TDT(0);
   5474 	}
   5475 
   5476 	wm_init_tx_descs(sc, txq);
   5477 	wm_init_tx_regs(sc, txq);
   5478 	wm_init_tx_buffer(sc, txq);
   5479 }
   5480 
   5481 static void
   5482 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5483 {
   5484 
   5485 	KASSERT(WM_RX_LOCKED(rxq));
   5486 
   5487 	/*
   5488 	 * Initialize the receive descriptor and receive job
   5489 	 * descriptor rings.
   5490 	 */
   5491 	if (sc->sc_type < WM_T_82543) {
   5492 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5493 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5494 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5495 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5496 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5497 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5498 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5499 
   5500 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5501 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5502 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5503 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5504 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5505 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5506 	} else {
   5507 		CSR_WRITE(sc, WMREG_RDBAH(0), WM_CDRXADDR_HI(rxq, 0));
   5508 		CSR_WRITE(sc, WMREG_RDBAL(0), WM_CDRXADDR_LO(rxq, 0));
   5509 		CSR_WRITE(sc, WMREG_RDLEN(0),
   5510 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5511 
   5512 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5513 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5514 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5515 			CSR_WRITE(sc, WMREG_SRRCTL(0), SRRCTL_DESCTYPE_LEGACY
   5516 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5517 			CSR_WRITE(sc, WMREG_RXDCTL(0), RXDCTL_QUEUE_ENABLE
   5518 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5519 			    | RXDCTL_WTHRESH(1));
   5520 		} else {
   5521 			CSR_WRITE(sc, WMREG_RDH(0), 0);
   5522 			CSR_WRITE(sc, WMREG_RDT(0), 0);
   5523 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   5524 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   5525 			CSR_WRITE(sc, WMREG_RXDCTL(0), RXDCTL_PTHRESH(0) |
   5526 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5527 		}
   5528 	}
   5529 }
   5530 
   5531 static int
   5532 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5533 {
   5534 	struct wm_rxsoft *rxs;
   5535 	int error, i;
   5536 
   5537 	KASSERT(WM_RX_LOCKED(rxq));
   5538 
   5539 	for (i = 0; i < WM_NRXDESC; i++) {
   5540 		rxs = &rxq->rxq_soft[i];
   5541 		if (rxs->rxs_mbuf == NULL) {
   5542 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5543 				log(LOG_ERR, "%s: unable to allocate or map "
   5544 				    "rx buffer %d, error = %d\n",
   5545 				    device_xname(sc->sc_dev), i, error);
   5546 				/*
   5547 				 * XXX Should attempt to run with fewer receive
   5548 				 * XXX buffers instead of just failing.
   5549 				 */
   5550 				wm_rxdrain(rxq);
   5551 				return ENOMEM;
   5552 			}
   5553 		} else {
   5554 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5555 				wm_init_rxdesc(rxq, i);
   5556 			/*
   5557 			 * For 82575 and newer device, the RX descriptors
   5558 			 * must be initialized after the setting of RCTL.EN in
   5559 			 * wm_set_filter()
   5560 			 */
   5561 		}
   5562 	}
   5563 	rxq->rxq_ptr = 0;
   5564 	rxq->rxq_discard = 0;
   5565 	WM_RXCHAIN_RESET(rxq);
   5566 
   5567 	return 0;
   5568 }
   5569 
   5570 static int
   5571 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5572 {
   5573 
   5574 	KASSERT(WM_RX_LOCKED(rxq));
   5575 
   5576 	/*
   5577 	 * Set up some register offsets that are different between
   5578 	 * the i82542 and the i82543 and later chips.
   5579 	 */
   5580 	if (sc->sc_type < WM_T_82543) {
   5581 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5582 	} else {
   5583 		rxq->rxq_rdt_reg = WMREG_RDT(0);
   5584 	}
   5585 
   5586 	wm_init_rx_regs(sc, rxq);
   5587 	return wm_init_rx_buffer(sc, rxq);
   5588 }
   5589 
   5590 /*
   5591  * wm_init_quques:
   5592  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5593  */
   5594 static int
   5595 wm_init_txrx_queues(struct wm_softc *sc)
   5596 {
   5597 	struct wm_txqueue *txq = sc->sc_txq;
   5598 	struct wm_rxqueue *rxq = sc->sc_rxq;
   5599 	int error;
   5600 
   5601 	WM_TX_LOCK(txq);
   5602 	wm_init_tx_queue(sc, txq);
   5603 	WM_TX_UNLOCK(txq);
   5604 
   5605 	WM_RX_LOCK(rxq);
   5606 	error = wm_init_rx_queue(sc, rxq);
   5607 	WM_RX_UNLOCK(rxq);
   5608 
   5609 	return error;
   5610 }
   5611 
   5612 /*
   5613  * wm_start:		[ifnet interface function]
   5614  *
   5615  *	Start packet transmission on the interface.
   5616  */
   5617 static void
   5618 wm_start(struct ifnet *ifp)
   5619 {
   5620 	struct wm_softc *sc = ifp->if_softc;
   5621 	struct wm_txqueue *txq = sc->sc_txq;
   5622 
   5623 	WM_TX_LOCK(txq);
   5624 	if (!sc->sc_stopping)
   5625 		wm_start_locked(ifp);
   5626 	WM_TX_UNLOCK(txq);
   5627 }
   5628 
   5629 static void
   5630 wm_start_locked(struct ifnet *ifp)
   5631 {
   5632 	struct wm_softc *sc = ifp->if_softc;
   5633 	struct wm_txqueue *txq = sc->sc_txq;
   5634 	struct mbuf *m0;
   5635 	struct m_tag *mtag;
   5636 	struct wm_txsoft *txs;
   5637 	bus_dmamap_t dmamap;
   5638 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   5639 	bus_addr_t curaddr;
   5640 	bus_size_t seglen, curlen;
   5641 	uint32_t cksumcmd;
   5642 	uint8_t cksumfields;
   5643 
   5644 	KASSERT(WM_TX_LOCKED(txq));
   5645 
   5646 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   5647 		return;
   5648 
   5649 	/* Remember the previous number of free descriptors. */
   5650 	ofree = txq->txq_free;
   5651 
   5652 	/*
   5653 	 * Loop through the send queue, setting up transmit descriptors
   5654 	 * until we drain the queue, or use up all available transmit
   5655 	 * descriptors.
   5656 	 */
   5657 	for (;;) {
   5658 		m0 = NULL;
   5659 
   5660 		/* Get a work queue entry. */
   5661 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   5662 			wm_txeof(sc);
   5663 			if (txq->txq_sfree == 0) {
   5664 				DPRINTF(WM_DEBUG_TX,
   5665 				    ("%s: TX: no free job descriptors\n",
   5666 					device_xname(sc->sc_dev)));
   5667 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   5668 				break;
   5669 			}
   5670 		}
   5671 
   5672 		/* Grab a packet off the queue. */
   5673 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   5674 		if (m0 == NULL)
   5675 			break;
   5676 
   5677 		DPRINTF(WM_DEBUG_TX,
   5678 		    ("%s: TX: have packet to transmit: %p\n",
   5679 		    device_xname(sc->sc_dev), m0));
   5680 
   5681 		txs = &txq->txq_soft[txq->txq_snext];
   5682 		dmamap = txs->txs_dmamap;
   5683 
   5684 		use_tso = (m0->m_pkthdr.csum_flags &
   5685 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   5686 
   5687 		/*
   5688 		 * So says the Linux driver:
   5689 		 * The controller does a simple calculation to make sure
   5690 		 * there is enough room in the FIFO before initiating the
   5691 		 * DMA for each buffer.  The calc is:
   5692 		 *	4 = ceil(buffer len / MSS)
   5693 		 * To make sure we don't overrun the FIFO, adjust the max
   5694 		 * buffer len if the MSS drops.
   5695 		 */
   5696 		dmamap->dm_maxsegsz =
   5697 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   5698 		    ? m0->m_pkthdr.segsz << 2
   5699 		    : WTX_MAX_LEN;
   5700 
   5701 		/*
   5702 		 * Load the DMA map.  If this fails, the packet either
   5703 		 * didn't fit in the allotted number of segments, or we
   5704 		 * were short on resources.  For the too-many-segments
   5705 		 * case, we simply report an error and drop the packet,
   5706 		 * since we can't sanely copy a jumbo packet to a single
   5707 		 * buffer.
   5708 		 */
   5709 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   5710 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   5711 		if (error) {
   5712 			if (error == EFBIG) {
   5713 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5714 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5715 				    "DMA segments, dropping...\n",
   5716 				    device_xname(sc->sc_dev));
   5717 				wm_dump_mbuf_chain(sc, m0);
   5718 				m_freem(m0);
   5719 				continue;
   5720 			}
   5721 			/*  Short on resources, just stop for now. */
   5722 			DPRINTF(WM_DEBUG_TX,
   5723 			    ("%s: TX: dmamap load failed: %d\n",
   5724 			    device_xname(sc->sc_dev), error));
   5725 			break;
   5726 		}
   5727 
   5728 		segs_needed = dmamap->dm_nsegs;
   5729 		if (use_tso) {
   5730 			/* For sentinel descriptor; see below. */
   5731 			segs_needed++;
   5732 		}
   5733 
   5734 		/*
   5735 		 * Ensure we have enough descriptors free to describe
   5736 		 * the packet.  Note, we always reserve one descriptor
   5737 		 * at the end of the ring due to the semantics of the
   5738 		 * TDT register, plus one more in the event we need
   5739 		 * to load offload context.
   5740 		 */
   5741 		if (segs_needed > txq->txq_free - 2) {
   5742 			/*
   5743 			 * Not enough free descriptors to transmit this
   5744 			 * packet.  We haven't committed anything yet,
   5745 			 * so just unload the DMA map, put the packet
   5746 			 * pack on the queue, and punt.  Notify the upper
   5747 			 * layer that there are no more slots left.
   5748 			 */
   5749 			DPRINTF(WM_DEBUG_TX,
   5750 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5751 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5752 			    segs_needed, sc->sc_txfree - 1));
   5753 			ifp->if_flags |= IFF_OACTIVE;
   5754 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5755 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5756 			break;
   5757 		}
   5758 
   5759 		/*
   5760 		 * Check for 82547 Tx FIFO bug.  We need to do this
   5761 		 * once we know we can transmit the packet, since we
   5762 		 * do some internal FIFO space accounting here.
   5763 		 */
   5764 		if (sc->sc_type == WM_T_82547 &&
   5765 		    wm_82547_txfifo_bugchk(sc, m0)) {
   5766 			DPRINTF(WM_DEBUG_TX,
   5767 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   5768 			    device_xname(sc->sc_dev)));
   5769 			ifp->if_flags |= IFF_OACTIVE;
   5770 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5771 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   5772 			break;
   5773 		}
   5774 
   5775 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5776 
   5777 		DPRINTF(WM_DEBUG_TX,
   5778 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5779 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5780 
   5781 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5782 
   5783 		/*
   5784 		 * Store a pointer to the packet so that we can free it
   5785 		 * later.
   5786 		 *
   5787 		 * Initially, we consider the number of descriptors the
   5788 		 * packet uses the number of DMA segments.  This may be
   5789 		 * incremented by 1 if we do checksum offload (a descriptor
   5790 		 * is used to set the checksum context).
   5791 		 */
   5792 		txs->txs_mbuf = m0;
   5793 		txs->txs_firstdesc = txq->txq_next;
   5794 		txs->txs_ndesc = segs_needed;
   5795 
   5796 		/* Set up offload parameters for this packet. */
   5797 		if (m0->m_pkthdr.csum_flags &
   5798 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5799 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5800 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5801 			if (wm_tx_offload(sc, txs, &cksumcmd,
   5802 					  &cksumfields) != 0) {
   5803 				/* Error message already displayed. */
   5804 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5805 				continue;
   5806 			}
   5807 		} else {
   5808 			cksumcmd = 0;
   5809 			cksumfields = 0;
   5810 		}
   5811 
   5812 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   5813 
   5814 		/* Sync the DMA map. */
   5815 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5816 		    BUS_DMASYNC_PREWRITE);
   5817 
   5818 		/* Initialize the transmit descriptor. */
   5819 		for (nexttx = txq->txq_next, seg = 0;
   5820 		     seg < dmamap->dm_nsegs; seg++) {
   5821 			for (seglen = dmamap->dm_segs[seg].ds_len,
   5822 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   5823 			     seglen != 0;
   5824 			     curaddr += curlen, seglen -= curlen,
   5825 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   5826 				curlen = seglen;
   5827 
   5828 				/*
   5829 				 * So says the Linux driver:
   5830 				 * Work around for premature descriptor
   5831 				 * write-backs in TSO mode.  Append a
   5832 				 * 4-byte sentinel descriptor.
   5833 				 */
   5834 				if (use_tso &&
   5835 				    seg == dmamap->dm_nsegs - 1 &&
   5836 				    curlen > 8)
   5837 					curlen -= 4;
   5838 
   5839 				wm_set_dma_addr(
   5840 				    &txq->txq_descs[nexttx].wtx_addr,
   5841 				    curaddr);
   5842 				txq->txq_descs[nexttx].wtx_cmdlen =
   5843 				    htole32(cksumcmd | curlen);
   5844 				txq->txq_descs[nexttx].wtx_fields.wtxu_status =
   5845 				    0;
   5846 				txq->txq_descs[nexttx].wtx_fields.wtxu_options =
   5847 				    cksumfields;
   5848 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
   5849 				lasttx = nexttx;
   5850 
   5851 				DPRINTF(WM_DEBUG_TX,
   5852 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   5853 				     "len %#04zx\n",
   5854 				    device_xname(sc->sc_dev), nexttx,
   5855 				    (uint64_t)curaddr, curlen));
   5856 			}
   5857 		}
   5858 
   5859 		KASSERT(lasttx != -1);
   5860 
   5861 		/*
   5862 		 * Set up the command byte on the last descriptor of
   5863 		 * the packet.  If we're in the interrupt delay window,
   5864 		 * delay the interrupt.
   5865 		 */
   5866 		txq->txq_descs[lasttx].wtx_cmdlen |=
   5867 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5868 
   5869 		/*
   5870 		 * If VLANs are enabled and the packet has a VLAN tag, set
   5871 		 * up the descriptor to encapsulate the packet for us.
   5872 		 *
   5873 		 * This is only valid on the last descriptor of the packet.
   5874 		 */
   5875 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5876 			txq->txq_descs[lasttx].wtx_cmdlen |=
   5877 			    htole32(WTX_CMD_VLE);
   5878 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   5879 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5880 		}
   5881 
   5882 		txs->txs_lastdesc = lasttx;
   5883 
   5884 		DPRINTF(WM_DEBUG_TX,
   5885 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5886 		    device_xname(sc->sc_dev),
   5887 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5888 
   5889 		/* Sync the descriptors we're using. */
   5890 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   5891 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5892 
   5893 		/* Give the packet to the chip. */
   5894 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   5895 
   5896 		DPRINTF(WM_DEBUG_TX,
   5897 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5898 
   5899 		DPRINTF(WM_DEBUG_TX,
   5900 		    ("%s: TX: finished transmitting packet, job %d\n",
   5901 		    device_xname(sc->sc_dev), txq->txq_txsnext));
   5902 
   5903 		/* Advance the tx pointer. */
   5904 		txq->txq_free -= txs->txs_ndesc;
   5905 		txq->txq_next = nexttx;
   5906 
   5907 		txq->txq_sfree--;
   5908 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   5909 
   5910 		/* Pass the packet to any BPF listeners. */
   5911 		bpf_mtap(ifp, m0);
   5912 	}
   5913 
   5914 	if (m0 != NULL) {
   5915 		ifp->if_flags |= IFF_OACTIVE;
   5916 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5917 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5918 		m_freem(m0);
   5919 	}
   5920 
   5921 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   5922 		/* No more slots; notify upper layer. */
   5923 		ifp->if_flags |= IFF_OACTIVE;
   5924 	}
   5925 
   5926 	if (txq->txq_free != ofree) {
   5927 		/* Set a watchdog timer in case the chip flakes out. */
   5928 		ifp->if_timer = 5;
   5929 	}
   5930 }
   5931 
   5932 /*
   5933  * wm_nq_tx_offload:
   5934  *
   5935  *	Set up TCP/IP checksumming parameters for the
   5936  *	specified packet, for NEWQUEUE devices
   5937  */
   5938 static int
   5939 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   5940     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   5941 {
   5942 	struct wm_txqueue *txq = sc->sc_txq;
   5943 	struct mbuf *m0 = txs->txs_mbuf;
   5944 	struct m_tag *mtag;
   5945 	uint32_t vl_len, mssidx, cmdc;
   5946 	struct ether_header *eh;
   5947 	int offset, iphl;
   5948 
   5949 	/*
   5950 	 * XXX It would be nice if the mbuf pkthdr had offset
   5951 	 * fields for the protocol headers.
   5952 	 */
   5953 	*cmdlenp = 0;
   5954 	*fieldsp = 0;
   5955 
   5956 	eh = mtod(m0, struct ether_header *);
   5957 	switch (htons(eh->ether_type)) {
   5958 	case ETHERTYPE_IP:
   5959 	case ETHERTYPE_IPV6:
   5960 		offset = ETHER_HDR_LEN;
   5961 		break;
   5962 
   5963 	case ETHERTYPE_VLAN:
   5964 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5965 		break;
   5966 
   5967 	default:
   5968 		/* Don't support this protocol or encapsulation. */
   5969 		*do_csum = false;
   5970 		return 0;
   5971 	}
   5972 	*do_csum = true;
   5973 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   5974 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   5975 
   5976 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   5977 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   5978 
   5979 	if ((m0->m_pkthdr.csum_flags &
   5980 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   5981 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5982 	} else {
   5983 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5984 	}
   5985 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   5986 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   5987 
   5988 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5989 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   5990 		     << NQTXC_VLLEN_VLAN_SHIFT);
   5991 		*cmdlenp |= NQTX_CMD_VLE;
   5992 	}
   5993 
   5994 	mssidx = 0;
   5995 
   5996 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5997 		int hlen = offset + iphl;
   5998 		int tcp_hlen;
   5999 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6000 
   6001 		if (__predict_false(m0->m_len <
   6002 				    (hlen + sizeof(struct tcphdr)))) {
   6003 			/*
   6004 			 * TCP/IP headers are not in the first mbuf; we need
   6005 			 * to do this the slow and painful way.  Let's just
   6006 			 * hope this doesn't happen very often.
   6007 			 */
   6008 			struct tcphdr th;
   6009 
   6010 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6011 
   6012 			m_copydata(m0, hlen, sizeof(th), &th);
   6013 			if (v4) {
   6014 				struct ip ip;
   6015 
   6016 				m_copydata(m0, offset, sizeof(ip), &ip);
   6017 				ip.ip_len = 0;
   6018 				m_copyback(m0,
   6019 				    offset + offsetof(struct ip, ip_len),
   6020 				    sizeof(ip.ip_len), &ip.ip_len);
   6021 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6022 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6023 			} else {
   6024 				struct ip6_hdr ip6;
   6025 
   6026 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6027 				ip6.ip6_plen = 0;
   6028 				m_copyback(m0,
   6029 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6030 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6031 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6032 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6033 			}
   6034 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6035 			    sizeof(th.th_sum), &th.th_sum);
   6036 
   6037 			tcp_hlen = th.th_off << 2;
   6038 		} else {
   6039 			/*
   6040 			 * TCP/IP headers are in the first mbuf; we can do
   6041 			 * this the easy way.
   6042 			 */
   6043 			struct tcphdr *th;
   6044 
   6045 			if (v4) {
   6046 				struct ip *ip =
   6047 				    (void *)(mtod(m0, char *) + offset);
   6048 				th = (void *)(mtod(m0, char *) + hlen);
   6049 
   6050 				ip->ip_len = 0;
   6051 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6052 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6053 			} else {
   6054 				struct ip6_hdr *ip6 =
   6055 				    (void *)(mtod(m0, char *) + offset);
   6056 				th = (void *)(mtod(m0, char *) + hlen);
   6057 
   6058 				ip6->ip6_plen = 0;
   6059 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6060 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6061 			}
   6062 			tcp_hlen = th->th_off << 2;
   6063 		}
   6064 		hlen += tcp_hlen;
   6065 		*cmdlenp |= NQTX_CMD_TSE;
   6066 
   6067 		if (v4) {
   6068 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6069 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6070 		} else {
   6071 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6072 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6073 		}
   6074 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6075 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6076 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6077 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6078 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6079 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6080 	} else {
   6081 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6082 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6083 	}
   6084 
   6085 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6086 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6087 		cmdc |= NQTXC_CMD_IP4;
   6088 	}
   6089 
   6090 	if (m0->m_pkthdr.csum_flags &
   6091 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6092 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6093 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6094 			cmdc |= NQTXC_CMD_TCP;
   6095 		} else {
   6096 			cmdc |= NQTXC_CMD_UDP;
   6097 		}
   6098 		cmdc |= NQTXC_CMD_IP4;
   6099 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6100 	}
   6101 	if (m0->m_pkthdr.csum_flags &
   6102 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6103 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6104 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6105 			cmdc |= NQTXC_CMD_TCP;
   6106 		} else {
   6107 			cmdc |= NQTXC_CMD_UDP;
   6108 		}
   6109 		cmdc |= NQTXC_CMD_IP6;
   6110 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6111 	}
   6112 
   6113 	/* Fill in the context descriptor. */
   6114 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6115 	    htole32(vl_len);
   6116 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6117 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6118 	    htole32(cmdc);
   6119 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6120 	    htole32(mssidx);
   6121 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6122 	DPRINTF(WM_DEBUG_TX,
   6123 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6124 	    txq->txq_txnext, 0, vl_len));
   6125 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6126 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6127 	txs->txs_ndesc++;
   6128 	return 0;
   6129 }
   6130 
   6131 /*
   6132  * wm_nq_start:		[ifnet interface function]
   6133  *
   6134  *	Start packet transmission on the interface for NEWQUEUE devices
   6135  */
   6136 static void
   6137 wm_nq_start(struct ifnet *ifp)
   6138 {
   6139 	struct wm_softc *sc = ifp->if_softc;
   6140 	struct wm_txqueue *txq = sc->sc_txq;
   6141 
   6142 	WM_TX_LOCK(txq);
   6143 	if (!sc->sc_stopping)
   6144 		wm_nq_start_locked(ifp);
   6145 	WM_TX_UNLOCK(txq);
   6146 }
   6147 
   6148 static void
   6149 wm_nq_start_locked(struct ifnet *ifp)
   6150 {
   6151 	struct wm_softc *sc = ifp->if_softc;
   6152 	struct wm_txqueue *txq = sc->sc_txq;
   6153 	struct mbuf *m0;
   6154 	struct m_tag *mtag;
   6155 	struct wm_txsoft *txs;
   6156 	bus_dmamap_t dmamap;
   6157 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6158 	bool do_csum, sent;
   6159 
   6160 	KASSERT(WM_TX_LOCKED(txq));
   6161 
   6162 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6163 		return;
   6164 
   6165 	sent = false;
   6166 
   6167 	/*
   6168 	 * Loop through the send queue, setting up transmit descriptors
   6169 	 * until we drain the queue, or use up all available transmit
   6170 	 * descriptors.
   6171 	 */
   6172 	for (;;) {
   6173 		m0 = NULL;
   6174 
   6175 		/* Get a work queue entry. */
   6176 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6177 			wm_txeof(sc);
   6178 			if (txq->txq_sfree == 0) {
   6179 				DPRINTF(WM_DEBUG_TX,
   6180 				    ("%s: TX: no free job descriptors\n",
   6181 					device_xname(sc->sc_dev)));
   6182 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6183 				break;
   6184 			}
   6185 		}
   6186 
   6187 		/* Grab a packet off the queue. */
   6188 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6189 		if (m0 == NULL)
   6190 			break;
   6191 
   6192 		DPRINTF(WM_DEBUG_TX,
   6193 		    ("%s: TX: have packet to transmit: %p\n",
   6194 		    device_xname(sc->sc_dev), m0));
   6195 
   6196 		txs = &txq->txq_soft[txq->txq_snext];
   6197 		dmamap = txs->txs_dmamap;
   6198 
   6199 		/*
   6200 		 * Load the DMA map.  If this fails, the packet either
   6201 		 * didn't fit in the allotted number of segments, or we
   6202 		 * were short on resources.  For the too-many-segments
   6203 		 * case, we simply report an error and drop the packet,
   6204 		 * since we can't sanely copy a jumbo packet to a single
   6205 		 * buffer.
   6206 		 */
   6207 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6208 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6209 		if (error) {
   6210 			if (error == EFBIG) {
   6211 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6212 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6213 				    "DMA segments, dropping...\n",
   6214 				    device_xname(sc->sc_dev));
   6215 				wm_dump_mbuf_chain(sc, m0);
   6216 				m_freem(m0);
   6217 				continue;
   6218 			}
   6219 			/* Short on resources, just stop for now. */
   6220 			DPRINTF(WM_DEBUG_TX,
   6221 			    ("%s: TX: dmamap load failed: %d\n",
   6222 			    device_xname(sc->sc_dev), error));
   6223 			break;
   6224 		}
   6225 
   6226 		segs_needed = dmamap->dm_nsegs;
   6227 
   6228 		/*
   6229 		 * Ensure we have enough descriptors free to describe
   6230 		 * the packet.  Note, we always reserve one descriptor
   6231 		 * at the end of the ring due to the semantics of the
   6232 		 * TDT register, plus one more in the event we need
   6233 		 * to load offload context.
   6234 		 */
   6235 		if (segs_needed > txq->txq_free - 2) {
   6236 			/*
   6237 			 * Not enough free descriptors to transmit this
   6238 			 * packet.  We haven't committed anything yet,
   6239 			 * so just unload the DMA map, put the packet
   6240 			 * pack on the queue, and punt.  Notify the upper
   6241 			 * layer that there are no more slots left.
   6242 			 */
   6243 			DPRINTF(WM_DEBUG_TX,
   6244 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6245 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6246 			    segs_needed, sc->sc_txfree - 1));
   6247 			ifp->if_flags |= IFF_OACTIVE;
   6248 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6249 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6250 			break;
   6251 		}
   6252 
   6253 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6254 
   6255 		DPRINTF(WM_DEBUG_TX,
   6256 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6257 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6258 
   6259 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6260 
   6261 		/*
   6262 		 * Store a pointer to the packet so that we can free it
   6263 		 * later.
   6264 		 *
   6265 		 * Initially, we consider the number of descriptors the
   6266 		 * packet uses the number of DMA segments.  This may be
   6267 		 * incremented by 1 if we do checksum offload (a descriptor
   6268 		 * is used to set the checksum context).
   6269 		 */
   6270 		txs->txs_mbuf = m0;
   6271 		txs->txs_firstdesc = txq->txq_next;
   6272 		txs->txs_ndesc = segs_needed;
   6273 
   6274 		/* Set up offload parameters for this packet. */
   6275 		uint32_t cmdlen, fields, dcmdlen;
   6276 		if (m0->m_pkthdr.csum_flags &
   6277 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6278 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6279 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6280 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6281 			    &do_csum) != 0) {
   6282 				/* Error message already displayed. */
   6283 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6284 				continue;
   6285 			}
   6286 		} else {
   6287 			do_csum = false;
   6288 			cmdlen = 0;
   6289 			fields = 0;
   6290 		}
   6291 
   6292 		/* Sync the DMA map. */
   6293 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6294 		    BUS_DMASYNC_PREWRITE);
   6295 
   6296 		/* Initialize the first transmit descriptor. */
   6297 		nexttx = txq->txq_next;
   6298 		if (!do_csum) {
   6299 			/* setup a legacy descriptor */
   6300 			wm_set_dma_addr(
   6301 			    &txq->txq_descs[nexttx].wtx_addr,
   6302 			    dmamap->dm_segs[0].ds_addr);
   6303 			txq->txq_descs[nexttx].wtx_cmdlen =
   6304 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6305 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6306 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6307 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6308 			    NULL) {
   6309 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6310 				    htole32(WTX_CMD_VLE);
   6311 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6312 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6313 			} else {
   6314 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6315 			}
   6316 			dcmdlen = 0;
   6317 		} else {
   6318 			/* setup an advanced data descriptor */
   6319 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6320 			    htole64(dmamap->dm_segs[0].ds_addr);
   6321 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6322 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6323 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6324 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6325 			    htole32(fields);
   6326 			DPRINTF(WM_DEBUG_TX,
   6327 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6328 			    device_xname(sc->sc_dev), nexttx,
   6329 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6330 			DPRINTF(WM_DEBUG_TX,
   6331 			    ("\t 0x%08x%08x\n", fields,
   6332 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6333 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6334 		}
   6335 
   6336 		lasttx = nexttx;
   6337 		nexttx = WM_NEXTTX(txq, nexttx);
   6338 		/*
   6339 		 * fill in the next descriptors. legacy or adcanced format
   6340 		 * is the same here
   6341 		 */
   6342 		for (seg = 1; seg < dmamap->dm_nsegs;
   6343 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6344 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6345 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6346 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6347 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6348 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6349 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6350 			lasttx = nexttx;
   6351 
   6352 			DPRINTF(WM_DEBUG_TX,
   6353 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6354 			     "len %#04zx\n",
   6355 			    device_xname(sc->sc_dev), nexttx,
   6356 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6357 			    dmamap->dm_segs[seg].ds_len));
   6358 		}
   6359 
   6360 		KASSERT(lasttx != -1);
   6361 
   6362 		/*
   6363 		 * Set up the command byte on the last descriptor of
   6364 		 * the packet.  If we're in the interrupt delay window,
   6365 		 * delay the interrupt.
   6366 		 */
   6367 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6368 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6369 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6370 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6371 
   6372 		txs->txs_lastdesc = lasttx;
   6373 
   6374 		DPRINTF(WM_DEBUG_TX,
   6375 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6376 		    device_xname(sc->sc_dev),
   6377 		    lasttx, le32toh(txq->txq_txdescs[lasttx].wtx_cmdlen)));
   6378 
   6379 		/* Sync the descriptors we're using. */
   6380 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6381 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6382 
   6383 		/* Give the packet to the chip. */
   6384 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6385 		sent = true;
   6386 
   6387 		DPRINTF(WM_DEBUG_TX,
   6388 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6389 
   6390 		DPRINTF(WM_DEBUG_TX,
   6391 		    ("%s: TX: finished transmitting packet, job %d\n",
   6392 		    device_xname(sc->sc_dev), txq->txq_txsnext));
   6393 
   6394 		/* Advance the tx pointer. */
   6395 		txq->txq_free -= txs->txs_ndesc;
   6396 		txq->txq_next = nexttx;
   6397 
   6398 		txq->txq_sfree--;
   6399 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6400 
   6401 		/* Pass the packet to any BPF listeners. */
   6402 		bpf_mtap(ifp, m0);
   6403 	}
   6404 
   6405 	if (m0 != NULL) {
   6406 		ifp->if_flags |= IFF_OACTIVE;
   6407 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6408 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6409 		m_freem(m0);
   6410 	}
   6411 
   6412 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6413 		/* No more slots; notify upper layer. */
   6414 		ifp->if_flags |= IFF_OACTIVE;
   6415 	}
   6416 
   6417 	if (sent) {
   6418 		/* Set a watchdog timer in case the chip flakes out. */
   6419 		ifp->if_timer = 5;
   6420 	}
   6421 }
   6422 
   6423 /* Interrupt */
   6424 
   6425 /*
   6426  * wm_txeof:
   6427  *
   6428  *	Helper; handle transmit interrupts.
   6429  */
   6430 static int
   6431 wm_txeof(struct wm_softc *sc)
   6432 {
   6433 	struct wm_txqueue *txq = sc->sc_txq;
   6434 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6435 	struct wm_txsoft *txs;
   6436 	bool processed = false;
   6437 	int count = 0;
   6438 	int i;
   6439 	uint8_t status;
   6440 
   6441 	if (sc->sc_stopping)
   6442 		return 0;
   6443 
   6444 	ifp->if_flags &= ~IFF_OACTIVE;
   6445 
   6446 	/*
   6447 	 * Go through the Tx list and free mbufs for those
   6448 	 * frames which have been transmitted.
   6449 	 */
   6450 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6451 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6452 		txs = &txq->txq_soft[i];
   6453 
   6454 		DPRINTF(WM_DEBUG_TX,
   6455 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   6456 
   6457 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6458 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6459 
   6460 		status =
   6461 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6462 		if ((status & WTX_ST_DD) == 0) {
   6463 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6464 			    BUS_DMASYNC_PREREAD);
   6465 			break;
   6466 		}
   6467 
   6468 		processed = true;
   6469 		count++;
   6470 		DPRINTF(WM_DEBUG_TX,
   6471 		    ("%s: TX: job %d done: descs %d..%d\n",
   6472 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6473 		    txs->txs_lastdesc));
   6474 
   6475 		/*
   6476 		 * XXX We should probably be using the statistics
   6477 		 * XXX registers, but I don't know if they exist
   6478 		 * XXX on chips before the i82544.
   6479 		 */
   6480 
   6481 #ifdef WM_EVENT_COUNTERS
   6482 		if (status & WTX_ST_TU)
   6483 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   6484 #endif /* WM_EVENT_COUNTERS */
   6485 
   6486 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   6487 			ifp->if_oerrors++;
   6488 			if (status & WTX_ST_LC)
   6489 				log(LOG_WARNING, "%s: late collision\n",
   6490 				    device_xname(sc->sc_dev));
   6491 			else if (status & WTX_ST_EC) {
   6492 				ifp->if_collisions += 16;
   6493 				log(LOG_WARNING, "%s: excessive collisions\n",
   6494 				    device_xname(sc->sc_dev));
   6495 			}
   6496 		} else
   6497 			ifp->if_opackets++;
   6498 
   6499 		txq->txq_free += txs->txs_ndesc;
   6500 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   6501 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   6502 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   6503 		m_freem(txs->txs_mbuf);
   6504 		txs->txs_mbuf = NULL;
   6505 	}
   6506 
   6507 	/* Update the dirty transmit buffer pointer. */
   6508 	txq->txq_sdirty = i;
   6509 	DPRINTF(WM_DEBUG_TX,
   6510 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   6511 
   6512 	if (count != 0)
   6513 		rnd_add_uint32(&sc->rnd_source, count);
   6514 
   6515 	/*
   6516 	 * If there are no more pending transmissions, cancel the watchdog
   6517 	 * timer.
   6518 	 */
   6519 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   6520 		ifp->if_timer = 0;
   6521 
   6522 	return processed;
   6523 }
   6524 
   6525 /*
   6526  * wm_rxeof:
   6527  *
   6528  *	Helper; handle receive interrupts.
   6529  */
   6530 static void
   6531 wm_rxeof(struct wm_rxqueue *rxq)
   6532 {
   6533 	struct wm_softc *sc = rxq->rxq_sc;
   6534 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6535 	struct wm_rxsoft *rxs;
   6536 	struct mbuf *m;
   6537 	int i, len;
   6538 	int count = 0;
   6539 	uint8_t status, errors;
   6540 	uint16_t vlantag;
   6541 
   6542 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   6543 		rxs = &rxq->rxq_soft[i];
   6544 
   6545 		DPRINTF(WM_DEBUG_RX,
   6546 		    ("%s: RX: checking descriptor %d\n",
   6547 		    device_xname(sc->sc_dev), i));
   6548 
   6549 		wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6550 
   6551 		status = rxq->rxq_descs[i].wrx_status;
   6552 		errors = rxq->rxq_descs[i].wrx_errors;
   6553 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   6554 		vlantag = rxq->rxq_descs[i].wrx_special;
   6555 
   6556 		if ((status & WRX_ST_DD) == 0) {
   6557 			/* We have processed all of the receive descriptors. */
   6558 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   6559 			break;
   6560 		}
   6561 
   6562 		count++;
   6563 		if (__predict_false(rxq->rxq_discard)) {
   6564 			DPRINTF(WM_DEBUG_RX,
   6565 			    ("%s: RX: discarding contents of descriptor %d\n",
   6566 			    device_xname(sc->sc_dev), i));
   6567 			wm_init_rxdesc(rxq, i);
   6568 			if (status & WRX_ST_EOP) {
   6569 				/* Reset our state. */
   6570 				DPRINTF(WM_DEBUG_RX,
   6571 				    ("%s: RX: resetting rxdiscard -> 0\n",
   6572 				    device_xname(sc->sc_dev)));
   6573 				rxq->rxq_discard = 0;
   6574 			}
   6575 			continue;
   6576 		}
   6577 
   6578 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6579 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   6580 
   6581 		m = rxs->rxs_mbuf;
   6582 
   6583 		/*
   6584 		 * Add a new receive buffer to the ring, unless of
   6585 		 * course the length is zero. Treat the latter as a
   6586 		 * failed mapping.
   6587 		 */
   6588 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   6589 			/*
   6590 			 * Failed, throw away what we've done so
   6591 			 * far, and discard the rest of the packet.
   6592 			 */
   6593 			ifp->if_ierrors++;
   6594 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6595 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   6596 			wm_init_rxdesc(rxq, i);
   6597 			if ((status & WRX_ST_EOP) == 0)
   6598 				rxq->rxq_discard = 1;
   6599 			if (rxq->rxq_head != NULL)
   6600 				m_freem(rxq->rxq_head);
   6601 			WM_RXCHAIN_RESET(rxq);
   6602 			DPRINTF(WM_DEBUG_RX,
   6603 			    ("%s: RX: Rx buffer allocation failed, "
   6604 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   6605 			    rxq->rxq_rxdiscard ? " (discard)" : ""));
   6606 			continue;
   6607 		}
   6608 
   6609 		m->m_len = len;
   6610 		rxq->rxq_len += len;
   6611 		DPRINTF(WM_DEBUG_RX,
   6612 		    ("%s: RX: buffer at %p len %d\n",
   6613 		    device_xname(sc->sc_dev), m->m_data, len));
   6614 
   6615 		/* If this is not the end of the packet, keep looking. */
   6616 		if ((status & WRX_ST_EOP) == 0) {
   6617 			WM_RXCHAIN_LINK(rxq, m);
   6618 			DPRINTF(WM_DEBUG_RX,
   6619 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   6620 			    device_xname(sc->sc_dev), rxq->rxq_rxlen));
   6621 			continue;
   6622 		}
   6623 
   6624 		/*
   6625 		 * Okay, we have the entire packet now.  The chip is
   6626 		 * configured to include the FCS except I350 and I21[01]
   6627 		 * (not all chips can be configured to strip it),
   6628 		 * so we need to trim it.
   6629 		 * May need to adjust length of previous mbuf in the
   6630 		 * chain if the current mbuf is too short.
   6631 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   6632 		 * is always set in I350, so we don't trim it.
   6633 		 */
   6634 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   6635 		    && (sc->sc_type != WM_T_I210)
   6636 		    && (sc->sc_type != WM_T_I211)) {
   6637 			if (m->m_len < ETHER_CRC_LEN) {
   6638 				rxq->rxq_tail->m_len
   6639 				    -= (ETHER_CRC_LEN - m->m_len);
   6640 				m->m_len = 0;
   6641 			} else
   6642 				m->m_len -= ETHER_CRC_LEN;
   6643 			len = rxq->rxq_len - ETHER_CRC_LEN;
   6644 		} else
   6645 			len = rxq->rxq_len;
   6646 
   6647 		WM_RXCHAIN_LINK(rxq, m);
   6648 
   6649 		*rxq->rxq_tailp = NULL;
   6650 		m = rxq->rxq_head;
   6651 
   6652 		WM_RXCHAIN_RESET(rxq);
   6653 
   6654 		DPRINTF(WM_DEBUG_RX,
   6655 		    ("%s: RX: have entire packet, len -> %d\n",
   6656 		    device_xname(sc->sc_dev), len));
   6657 
   6658 		/* If an error occurred, update stats and drop the packet. */
   6659 		if (errors &
   6660 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   6661 			if (errors & WRX_ER_SE)
   6662 				log(LOG_WARNING, "%s: symbol error\n",
   6663 				    device_xname(sc->sc_dev));
   6664 			else if (errors & WRX_ER_SEQ)
   6665 				log(LOG_WARNING, "%s: receive sequence error\n",
   6666 				    device_xname(sc->sc_dev));
   6667 			else if (errors & WRX_ER_CE)
   6668 				log(LOG_WARNING, "%s: CRC error\n",
   6669 				    device_xname(sc->sc_dev));
   6670 			m_freem(m);
   6671 			continue;
   6672 		}
   6673 
   6674 		/* No errors.  Receive the packet. */
   6675 		m->m_pkthdr.rcvif = ifp;
   6676 		m->m_pkthdr.len = len;
   6677 
   6678 		/*
   6679 		 * If VLANs are enabled, VLAN packets have been unwrapped
   6680 		 * for us.  Associate the tag with the packet.
   6681 		 */
   6682 		/* XXXX should check for i350 and i354 */
   6683 		if ((status & WRX_ST_VP) != 0) {
   6684 			VLAN_INPUT_TAG(ifp, m,
   6685 			    le16toh(vlantag),
   6686 			    continue);
   6687 		}
   6688 
   6689 		/* Set up checksum info for this packet. */
   6690 		if ((status & WRX_ST_IXSM) == 0) {
   6691 			if (status & WRX_ST_IPCS) {
   6692 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   6693 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   6694 				if (errors & WRX_ER_IPE)
   6695 					m->m_pkthdr.csum_flags |=
   6696 					    M_CSUM_IPv4_BAD;
   6697 			}
   6698 			if (status & WRX_ST_TCPCS) {
   6699 				/*
   6700 				 * Note: we don't know if this was TCP or UDP,
   6701 				 * so we just set both bits, and expect the
   6702 				 * upper layers to deal.
   6703 				 */
   6704 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   6705 				m->m_pkthdr.csum_flags |=
   6706 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6707 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   6708 				if (errors & WRX_ER_TCPE)
   6709 					m->m_pkthdr.csum_flags |=
   6710 					    M_CSUM_TCP_UDP_BAD;
   6711 			}
   6712 		}
   6713 
   6714 		ifp->if_ipackets++;
   6715 
   6716 		WM_RX_UNLOCK(rxq);
   6717 
   6718 		/* Pass this up to any BPF listeners. */
   6719 		bpf_mtap(ifp, m);
   6720 
   6721 		/* Pass it on. */
   6722 		(*ifp->if_input)(ifp, m);
   6723 
   6724 		WM_RX_LOCK(rxq);
   6725 
   6726 		if (sc->sc_stopping)
   6727 			break;
   6728 	}
   6729 
   6730 	/* Update the receive pointer. */
   6731 	rxq->rxq_ptr = i;
   6732 	if (count != 0)
   6733 		rnd_add_uint32(&sc->rnd_source, count);
   6734 
   6735 	DPRINTF(WM_DEBUG_RX,
   6736 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   6737 }
   6738 
   6739 /*
   6740  * wm_linkintr_gmii:
   6741  *
   6742  *	Helper; handle link interrupts for GMII.
   6743  */
   6744 static void
   6745 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   6746 {
   6747 
   6748 	KASSERT(WM_CORE_LOCKED(sc));
   6749 
   6750 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6751 		__func__));
   6752 
   6753 	if (icr & ICR_LSC) {
   6754 		DPRINTF(WM_DEBUG_LINK,
   6755 		    ("%s: LINK: LSC -> mii_pollstat\n",
   6756 			device_xname(sc->sc_dev)));
   6757 		mii_pollstat(&sc->sc_mii);
   6758 		if (sc->sc_type == WM_T_82543) {
   6759 			int miistatus, active;
   6760 
   6761 			/*
   6762 			 * With 82543, we need to force speed and
   6763 			 * duplex on the MAC equal to what the PHY
   6764 			 * speed and duplex configuration is.
   6765 			 */
   6766 			miistatus = sc->sc_mii.mii_media_status;
   6767 
   6768 			if (miistatus & IFM_ACTIVE) {
   6769 				active = sc->sc_mii.mii_media_active;
   6770 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6771 				switch (IFM_SUBTYPE(active)) {
   6772 				case IFM_10_T:
   6773 					sc->sc_ctrl |= CTRL_SPEED_10;
   6774 					break;
   6775 				case IFM_100_TX:
   6776 					sc->sc_ctrl |= CTRL_SPEED_100;
   6777 					break;
   6778 				case IFM_1000_T:
   6779 					sc->sc_ctrl |= CTRL_SPEED_1000;
   6780 					break;
   6781 				default:
   6782 					/*
   6783 					 * fiber?
   6784 					 * Shoud not enter here.
   6785 					 */
   6786 					printf("unknown media (%x)\n",
   6787 					    active);
   6788 					break;
   6789 				}
   6790 				if (active & IFM_FDX)
   6791 					sc->sc_ctrl |= CTRL_FD;
   6792 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6793 			}
   6794 		} else if ((sc->sc_type == WM_T_ICH8)
   6795 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   6796 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   6797 		} else if (sc->sc_type == WM_T_PCH) {
   6798 			wm_k1_gig_workaround_hv(sc,
   6799 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   6800 		}
   6801 
   6802 		if ((sc->sc_phytype == WMPHY_82578)
   6803 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   6804 			== IFM_1000_T)) {
   6805 
   6806 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   6807 				delay(200*1000); /* XXX too big */
   6808 
   6809 				/* Link stall fix for link up */
   6810 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6811 				    HV_MUX_DATA_CTRL,
   6812 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   6813 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   6814 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6815 				    HV_MUX_DATA_CTRL,
   6816 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   6817 			}
   6818 		}
   6819 	} else if (icr & ICR_RXSEQ) {
   6820 		DPRINTF(WM_DEBUG_LINK,
   6821 		    ("%s: LINK Receive sequence error\n",
   6822 			device_xname(sc->sc_dev)));
   6823 	}
   6824 }
   6825 
   6826 /*
   6827  * wm_linkintr_tbi:
   6828  *
   6829  *	Helper; handle link interrupts for TBI mode.
   6830  */
   6831 static void
   6832 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   6833 {
   6834 	uint32_t status;
   6835 
   6836 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6837 		__func__));
   6838 
   6839 	status = CSR_READ(sc, WMREG_STATUS);
   6840 	if (icr & ICR_LSC) {
   6841 		if (status & STATUS_LU) {
   6842 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   6843 			    device_xname(sc->sc_dev),
   6844 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   6845 			/*
   6846 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   6847 			 * so we should update sc->sc_ctrl
   6848 			 */
   6849 
   6850 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   6851 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6852 			sc->sc_fcrtl &= ~FCRTL_XONE;
   6853 			if (status & STATUS_FD)
   6854 				sc->sc_tctl |=
   6855 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6856 			else
   6857 				sc->sc_tctl |=
   6858 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6859 			if (sc->sc_ctrl & CTRL_TFCE)
   6860 				sc->sc_fcrtl |= FCRTL_XONE;
   6861 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6862 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   6863 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   6864 				      sc->sc_fcrtl);
   6865 			sc->sc_tbi_linkup = 1;
   6866 		} else {
   6867 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   6868 			    device_xname(sc->sc_dev)));
   6869 			sc->sc_tbi_linkup = 0;
   6870 		}
   6871 		/* Update LED */
   6872 		wm_tbi_serdes_set_linkled(sc);
   6873 	} else if (icr & ICR_RXSEQ) {
   6874 		DPRINTF(WM_DEBUG_LINK,
   6875 		    ("%s: LINK: Receive sequence error\n",
   6876 		    device_xname(sc->sc_dev)));
   6877 	}
   6878 }
   6879 
   6880 /*
   6881  * wm_linkintr_serdes:
   6882  *
   6883  *	Helper; handle link interrupts for TBI mode.
   6884  */
   6885 static void
   6886 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   6887 {
   6888 	struct mii_data *mii = &sc->sc_mii;
   6889 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6890 	uint32_t pcs_adv, pcs_lpab, reg;
   6891 
   6892 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6893 		__func__));
   6894 
   6895 	if (icr & ICR_LSC) {
   6896 		/* Check PCS */
   6897 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6898 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   6899 			mii->mii_media_status |= IFM_ACTIVE;
   6900 			sc->sc_tbi_linkup = 1;
   6901 		} else {
   6902 			mii->mii_media_status |= IFM_NONE;
   6903 			sc->sc_tbi_linkup = 0;
   6904 			wm_tbi_serdes_set_linkled(sc);
   6905 			return;
   6906 		}
   6907 		mii->mii_media_active |= IFM_1000_SX;
   6908 		if ((reg & PCS_LSTS_FDX) != 0)
   6909 			mii->mii_media_active |= IFM_FDX;
   6910 		else
   6911 			mii->mii_media_active |= IFM_HDX;
   6912 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6913 			/* Check flow */
   6914 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6915 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   6916 				DPRINTF(WM_DEBUG_LINK,
   6917 				    ("XXX LINKOK but not ACOMP\n"));
   6918 				return;
   6919 			}
   6920 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   6921 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   6922 			DPRINTF(WM_DEBUG_LINK,
   6923 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   6924 			if ((pcs_adv & TXCW_SYM_PAUSE)
   6925 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   6926 				mii->mii_media_active |= IFM_FLOW
   6927 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   6928 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   6929 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6930 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   6931 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6932 				mii->mii_media_active |= IFM_FLOW
   6933 				    | IFM_ETH_TXPAUSE;
   6934 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   6935 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6936 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   6937 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6938 				mii->mii_media_active |= IFM_FLOW
   6939 				    | IFM_ETH_RXPAUSE;
   6940 		}
   6941 		/* Update LED */
   6942 		wm_tbi_serdes_set_linkled(sc);
   6943 	} else {
   6944 		DPRINTF(WM_DEBUG_LINK,
   6945 		    ("%s: LINK: Receive sequence error\n",
   6946 		    device_xname(sc->sc_dev)));
   6947 	}
   6948 }
   6949 
   6950 /*
   6951  * wm_linkintr:
   6952  *
   6953  *	Helper; handle link interrupts.
   6954  */
   6955 static void
   6956 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   6957 {
   6958 
   6959 	KASSERT(WM_CORE_LOCKED(sc));
   6960 
   6961 	if (sc->sc_flags & WM_F_HAS_MII)
   6962 		wm_linkintr_gmii(sc, icr);
   6963 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   6964 	    && (sc->sc_type >= WM_T_82575))
   6965 		wm_linkintr_serdes(sc, icr);
   6966 	else
   6967 		wm_linkintr_tbi(sc, icr);
   6968 }
   6969 
   6970 /*
   6971  * wm_intr_legacy:
   6972  *
   6973  *	Interrupt service routine for INTx and MSI.
   6974  */
   6975 static int
   6976 wm_intr_legacy(void *arg)
   6977 {
   6978 	struct wm_softc *sc = arg;
   6979 	struct wm_txqueue *txq = sc->sc_txq;
   6980 	struct wm_rxqueue *rxq = sc->sc_rxq;
   6981 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6982 	uint32_t icr, rndval = 0;
   6983 	int handled = 0;
   6984 
   6985 	DPRINTF(WM_DEBUG_TX,
   6986 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   6987 	while (1 /* CONSTCOND */) {
   6988 		icr = CSR_READ(sc, WMREG_ICR);
   6989 		if ((icr & sc->sc_icr) == 0)
   6990 			break;
   6991 		if (rndval == 0)
   6992 			rndval = icr;
   6993 
   6994 		WM_RX_LOCK(rxq);
   6995 
   6996 		if (sc->sc_stopping) {
   6997 			WM_RX_UNLOCK(rxq);
   6998 			break;
   6999 		}
   7000 
   7001 		handled = 1;
   7002 
   7003 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7004 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   7005 			DPRINTF(WM_DEBUG_RX,
   7006 			    ("%s: RX: got Rx intr 0x%08x\n",
   7007 			    device_xname(sc->sc_dev),
   7008 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   7009 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7010 		}
   7011 #endif
   7012 		wm_rxeof(rxq);
   7013 
   7014 		WM_RX_UNLOCK(rxq);
   7015 		WM_TX_LOCK(txq);
   7016 
   7017 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7018 		if (icr & ICR_TXDW) {
   7019 			DPRINTF(WM_DEBUG_TX,
   7020 			    ("%s: TX: got TXDW interrupt\n",
   7021 			    device_xname(sc->sc_dev)));
   7022 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7023 		}
   7024 #endif
   7025 		wm_txeof(sc);
   7026 
   7027 		WM_TX_UNLOCK(txq);
   7028 		WM_CORE_LOCK(sc);
   7029 
   7030 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   7031 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7032 			wm_linkintr(sc, icr);
   7033 		}
   7034 
   7035 		WM_CORE_UNLOCK(sc);
   7036 
   7037 		if (icr & ICR_RXO) {
   7038 #if defined(WM_DEBUG)
   7039 			log(LOG_WARNING, "%s: Receive overrun\n",
   7040 			    device_xname(sc->sc_dev));
   7041 #endif /* defined(WM_DEBUG) */
   7042 		}
   7043 	}
   7044 
   7045 	rnd_add_uint32(&sc->rnd_source, rndval);
   7046 
   7047 	if (handled) {
   7048 		/* Try to get more packets going. */
   7049 		ifp->if_start(ifp);
   7050 	}
   7051 
   7052 	return handled;
   7053 }
   7054 
   7055 #ifdef WM_MSI_MSIX
   7056 /*
   7057  * wm_txintr_msix:
   7058  *
   7059  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7060  */
   7061 static int
   7062 wm_txintr_msix(void *arg)
   7063 {
   7064 	struct wm_softc *sc = arg;
   7065 	struct wm_txqueue *txq = sc->sc_txq;
   7066 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7067 	int handled = 0;
   7068 
   7069 	DPRINTF(WM_DEBUG_TX,
   7070 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7071 
   7072 	if (sc->sc_type == WM_T_82574)
   7073 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(0)); /* 82574 only */
   7074 	else if (sc->sc_type == WM_T_82575)
   7075 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(0));
   7076 	else
   7077 		CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
   7078 
   7079 	WM_TX_LOCK(txq);
   7080 
   7081 	if (sc->sc_stopping)
   7082 		goto out;
   7083 
   7084 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7085 	handled = wm_txeof(sc);
   7086 
   7087 out:
   7088 	WM_TX_UNLOCK(txq);
   7089 
   7090 	if (sc->sc_type == WM_T_82574)
   7091 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(0)); /* 82574 only */
   7092 	else if (sc->sc_type == WM_T_82575)
   7093 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(0));
   7094 	else
   7095 		CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
   7096 
   7097 	if (handled) {
   7098 		/* Try to get more packets going. */
   7099 		ifp->if_start(ifp);
   7100 	}
   7101 
   7102 	return handled;
   7103 }
   7104 
   7105 /*
   7106  * wm_rxintr_msix:
   7107  *
   7108  *	Interrupt service routine for RX interrupt for MSI-X.
   7109  */
   7110 static int
   7111 wm_rxintr_msix(void *arg)
   7112 {
   7113 	struct wm_softc *sc = arg;
   7114 	struct wm_rxqueue *rxq = sc->sc_rxq;
   7115 
   7116 	DPRINTF(WM_DEBUG_TX,
   7117 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7118 
   7119 	if (sc->sc_type == WM_T_82574)
   7120 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(0)); /* 82574 only */
   7121 	else if (sc->sc_type == WM_T_82575)
   7122 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(0));
   7123 	else
   7124 		CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
   7125 
   7126 	WM_RX_LOCK(rxq);
   7127 
   7128 	if (sc->sc_stopping)
   7129 		goto out;
   7130 
   7131 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7132 	wm_rxeof(rxq);
   7133 
   7134 out:
   7135 	WM_RX_UNLOCK(rxq);
   7136 
   7137 	if (sc->sc_type == WM_T_82574)
   7138 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(0));
   7139 	else if (sc->sc_type == WM_T_82575)
   7140 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(0));
   7141 	else
   7142 		CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
   7143 
   7144 	return 1;
   7145 }
   7146 
   7147 /*
   7148  * wm_linkintr_msix:
   7149  *
   7150  *	Interrupt service routine for link status change for MSI-X.
   7151  */
   7152 static int
   7153 wm_linkintr_msix(void *arg)
   7154 {
   7155 	struct wm_softc *sc = arg;
   7156 	uint32_t reg;
   7157 
   7158 	DPRINTF(WM_DEBUG_TX,
   7159 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7160 
   7161 	reg = CSR_READ(sc, WMREG_ICR);
   7162 	WM_CORE_LOCK(sc);
   7163 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7164 		goto out;
   7165 
   7166 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7167 	wm_linkintr(sc, ICR_LSC);
   7168 
   7169 out:
   7170 	WM_CORE_UNLOCK(sc);
   7171 
   7172 	if (sc->sc_type == WM_T_82574)
   7173 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
   7174 	else if (sc->sc_type == WM_T_82575)
   7175 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7176 	else
   7177 		CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
   7178 
   7179 	return 1;
   7180 }
   7181 #endif /* WM_MSI_MSIX */
   7182 
   7183 /*
   7184  * Media related.
   7185  * GMII, SGMII, TBI (and SERDES)
   7186  */
   7187 
   7188 /* Common */
   7189 
   7190 /*
   7191  * wm_tbi_serdes_set_linkled:
   7192  *
   7193  *	Update the link LED on TBI and SERDES devices.
   7194  */
   7195 static void
   7196 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7197 {
   7198 
   7199 	if (sc->sc_tbi_linkup)
   7200 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7201 	else
   7202 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7203 
   7204 	/* 82540 or newer devices are active low */
   7205 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7206 
   7207 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7208 }
   7209 
   7210 /* GMII related */
   7211 
   7212 /*
   7213  * wm_gmii_reset:
   7214  *
   7215  *	Reset the PHY.
   7216  */
   7217 static void
   7218 wm_gmii_reset(struct wm_softc *sc)
   7219 {
   7220 	uint32_t reg;
   7221 	int rv;
   7222 
   7223 	/* get phy semaphore */
   7224 	switch (sc->sc_type) {
   7225 	case WM_T_82571:
   7226 	case WM_T_82572:
   7227 	case WM_T_82573:
   7228 	case WM_T_82574:
   7229 	case WM_T_82583:
   7230 		 /* XXX should get sw semaphore, too */
   7231 		rv = wm_get_swsm_semaphore(sc);
   7232 		break;
   7233 	case WM_T_82575:
   7234 	case WM_T_82576:
   7235 	case WM_T_82580:
   7236 	case WM_T_I350:
   7237 	case WM_T_I354:
   7238 	case WM_T_I210:
   7239 	case WM_T_I211:
   7240 	case WM_T_80003:
   7241 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7242 		break;
   7243 	case WM_T_ICH8:
   7244 	case WM_T_ICH9:
   7245 	case WM_T_ICH10:
   7246 	case WM_T_PCH:
   7247 	case WM_T_PCH2:
   7248 	case WM_T_PCH_LPT:
   7249 		rv = wm_get_swfwhw_semaphore(sc);
   7250 		break;
   7251 	default:
   7252 		/* nothing to do*/
   7253 		rv = 0;
   7254 		break;
   7255 	}
   7256 	if (rv != 0) {
   7257 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7258 		    __func__);
   7259 		return;
   7260 	}
   7261 
   7262 	switch (sc->sc_type) {
   7263 	case WM_T_82542_2_0:
   7264 	case WM_T_82542_2_1:
   7265 		/* null */
   7266 		break;
   7267 	case WM_T_82543:
   7268 		/*
   7269 		 * With 82543, we need to force speed and duplex on the MAC
   7270 		 * equal to what the PHY speed and duplex configuration is.
   7271 		 * In addition, we need to perform a hardware reset on the PHY
   7272 		 * to take it out of reset.
   7273 		 */
   7274 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7275 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7276 
   7277 		/* The PHY reset pin is active-low. */
   7278 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7279 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7280 		    CTRL_EXT_SWDPIN(4));
   7281 		reg |= CTRL_EXT_SWDPIO(4);
   7282 
   7283 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7284 		CSR_WRITE_FLUSH(sc);
   7285 		delay(10*1000);
   7286 
   7287 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7288 		CSR_WRITE_FLUSH(sc);
   7289 		delay(150);
   7290 #if 0
   7291 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7292 #endif
   7293 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7294 		break;
   7295 	case WM_T_82544:	/* reset 10000us */
   7296 	case WM_T_82540:
   7297 	case WM_T_82545:
   7298 	case WM_T_82545_3:
   7299 	case WM_T_82546:
   7300 	case WM_T_82546_3:
   7301 	case WM_T_82541:
   7302 	case WM_T_82541_2:
   7303 	case WM_T_82547:
   7304 	case WM_T_82547_2:
   7305 	case WM_T_82571:	/* reset 100us */
   7306 	case WM_T_82572:
   7307 	case WM_T_82573:
   7308 	case WM_T_82574:
   7309 	case WM_T_82575:
   7310 	case WM_T_82576:
   7311 	case WM_T_82580:
   7312 	case WM_T_I350:
   7313 	case WM_T_I354:
   7314 	case WM_T_I210:
   7315 	case WM_T_I211:
   7316 	case WM_T_82583:
   7317 	case WM_T_80003:
   7318 		/* generic reset */
   7319 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7320 		CSR_WRITE_FLUSH(sc);
   7321 		delay(20000);
   7322 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7323 		CSR_WRITE_FLUSH(sc);
   7324 		delay(20000);
   7325 
   7326 		if ((sc->sc_type == WM_T_82541)
   7327 		    || (sc->sc_type == WM_T_82541_2)
   7328 		    || (sc->sc_type == WM_T_82547)
   7329 		    || (sc->sc_type == WM_T_82547_2)) {
   7330 			/* workaround for igp are done in igp_reset() */
   7331 			/* XXX add code to set LED after phy reset */
   7332 		}
   7333 		break;
   7334 	case WM_T_ICH8:
   7335 	case WM_T_ICH9:
   7336 	case WM_T_ICH10:
   7337 	case WM_T_PCH:
   7338 	case WM_T_PCH2:
   7339 	case WM_T_PCH_LPT:
   7340 		/* generic reset */
   7341 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7342 		CSR_WRITE_FLUSH(sc);
   7343 		delay(100);
   7344 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7345 		CSR_WRITE_FLUSH(sc);
   7346 		delay(150);
   7347 		break;
   7348 	default:
   7349 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7350 		    __func__);
   7351 		break;
   7352 	}
   7353 
   7354 	/* release PHY semaphore */
   7355 	switch (sc->sc_type) {
   7356 	case WM_T_82571:
   7357 	case WM_T_82572:
   7358 	case WM_T_82573:
   7359 	case WM_T_82574:
   7360 	case WM_T_82583:
   7361 		 /* XXX should put sw semaphore, too */
   7362 		wm_put_swsm_semaphore(sc);
   7363 		break;
   7364 	case WM_T_82575:
   7365 	case WM_T_82576:
   7366 	case WM_T_82580:
   7367 	case WM_T_I350:
   7368 	case WM_T_I354:
   7369 	case WM_T_I210:
   7370 	case WM_T_I211:
   7371 	case WM_T_80003:
   7372 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7373 		break;
   7374 	case WM_T_ICH8:
   7375 	case WM_T_ICH9:
   7376 	case WM_T_ICH10:
   7377 	case WM_T_PCH:
   7378 	case WM_T_PCH2:
   7379 	case WM_T_PCH_LPT:
   7380 		wm_put_swfwhw_semaphore(sc);
   7381 		break;
   7382 	default:
   7383 		/* nothing to do*/
   7384 		rv = 0;
   7385 		break;
   7386 	}
   7387 
   7388 	/* get_cfg_done */
   7389 	wm_get_cfg_done(sc);
   7390 
   7391 	/* extra setup */
   7392 	switch (sc->sc_type) {
   7393 	case WM_T_82542_2_0:
   7394 	case WM_T_82542_2_1:
   7395 	case WM_T_82543:
   7396 	case WM_T_82544:
   7397 	case WM_T_82540:
   7398 	case WM_T_82545:
   7399 	case WM_T_82545_3:
   7400 	case WM_T_82546:
   7401 	case WM_T_82546_3:
   7402 	case WM_T_82541_2:
   7403 	case WM_T_82547_2:
   7404 	case WM_T_82571:
   7405 	case WM_T_82572:
   7406 	case WM_T_82573:
   7407 	case WM_T_82574:
   7408 	case WM_T_82575:
   7409 	case WM_T_82576:
   7410 	case WM_T_82580:
   7411 	case WM_T_I350:
   7412 	case WM_T_I354:
   7413 	case WM_T_I210:
   7414 	case WM_T_I211:
   7415 	case WM_T_82583:
   7416 	case WM_T_80003:
   7417 		/* null */
   7418 		break;
   7419 	case WM_T_82541:
   7420 	case WM_T_82547:
   7421 		/* XXX Configure actively LED after PHY reset */
   7422 		break;
   7423 	case WM_T_ICH8:
   7424 	case WM_T_ICH9:
   7425 	case WM_T_ICH10:
   7426 	case WM_T_PCH:
   7427 	case WM_T_PCH2:
   7428 	case WM_T_PCH_LPT:
   7429 		/* Allow time for h/w to get to a quiescent state afer reset */
   7430 		delay(10*1000);
   7431 
   7432 		if (sc->sc_type == WM_T_PCH)
   7433 			wm_hv_phy_workaround_ich8lan(sc);
   7434 
   7435 		if (sc->sc_type == WM_T_PCH2)
   7436 			wm_lv_phy_workaround_ich8lan(sc);
   7437 
   7438 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7439 			/*
   7440 			 * dummy read to clear the phy wakeup bit after lcd
   7441 			 * reset
   7442 			 */
   7443 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7444 		}
   7445 
   7446 		/*
   7447 		 * XXX Configure the LCD with th extended configuration region
   7448 		 * in NVM
   7449 		 */
   7450 
   7451 		/* Configure the LCD with the OEM bits in NVM */
   7452 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   7453 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   7454 			/*
   7455 			 * Disable LPLU.
   7456 			 * XXX It seems that 82567 has LPLU, too.
   7457 			 */
   7458 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   7459 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   7460 			reg |= HV_OEM_BITS_ANEGNOW;
   7461 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   7462 		}
   7463 		break;
   7464 	default:
   7465 		panic("%s: unknown type\n", __func__);
   7466 		break;
   7467 	}
   7468 }
   7469 
   7470 /*
   7471  * wm_get_phy_id_82575:
   7472  *
   7473  * Return PHY ID. Return -1 if it failed.
   7474  */
   7475 static int
   7476 wm_get_phy_id_82575(struct wm_softc *sc)
   7477 {
   7478 	uint32_t reg;
   7479 	int phyid = -1;
   7480 
   7481 	/* XXX */
   7482 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7483 		return -1;
   7484 
   7485 	if (wm_sgmii_uses_mdio(sc)) {
   7486 		switch (sc->sc_type) {
   7487 		case WM_T_82575:
   7488 		case WM_T_82576:
   7489 			reg = CSR_READ(sc, WMREG_MDIC);
   7490 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7491 			break;
   7492 		case WM_T_82580:
   7493 		case WM_T_I350:
   7494 		case WM_T_I354:
   7495 		case WM_T_I210:
   7496 		case WM_T_I211:
   7497 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7498 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7499 			break;
   7500 		default:
   7501 			return -1;
   7502 		}
   7503 	}
   7504 
   7505 	return phyid;
   7506 }
   7507 
   7508 
   7509 /*
   7510  * wm_gmii_mediainit:
   7511  *
   7512  *	Initialize media for use on 1000BASE-T devices.
   7513  */
   7514 static void
   7515 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   7516 {
   7517 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7518 	struct mii_data *mii = &sc->sc_mii;
   7519 	uint32_t reg;
   7520 
   7521 	/* We have GMII. */
   7522 	sc->sc_flags |= WM_F_HAS_MII;
   7523 
   7524 	if (sc->sc_type == WM_T_80003)
   7525 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7526 	else
   7527 		sc->sc_tipg = TIPG_1000T_DFLT;
   7528 
   7529 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   7530 	if ((sc->sc_type == WM_T_82580)
   7531 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   7532 	    || (sc->sc_type == WM_T_I211)) {
   7533 		reg = CSR_READ(sc, WMREG_PHPM);
   7534 		reg &= ~PHPM_GO_LINK_D;
   7535 		CSR_WRITE(sc, WMREG_PHPM, reg);
   7536 	}
   7537 
   7538 	/*
   7539 	 * Let the chip set speed/duplex on its own based on
   7540 	 * signals from the PHY.
   7541 	 * XXXbouyer - I'm not sure this is right for the 80003,
   7542 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   7543 	 */
   7544 	sc->sc_ctrl |= CTRL_SLU;
   7545 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7546 
   7547 	/* Initialize our media structures and probe the GMII. */
   7548 	mii->mii_ifp = ifp;
   7549 
   7550 	/*
   7551 	 * Determine the PHY access method.
   7552 	 *
   7553 	 *  For SGMII, use SGMII specific method.
   7554 	 *
   7555 	 *  For some devices, we can determine the PHY access method
   7556 	 * from sc_type.
   7557 	 *
   7558 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   7559 	 * access  method by sc_type, so use the PCI product ID for some
   7560 	 * devices.
   7561 	 * For other ICH8 variants, try to use igp's method. If the PHY
   7562 	 * can't detect, then use bm's method.
   7563 	 */
   7564 	switch (prodid) {
   7565 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   7566 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   7567 		/* 82577 */
   7568 		sc->sc_phytype = WMPHY_82577;
   7569 		break;
   7570 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   7571 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   7572 		/* 82578 */
   7573 		sc->sc_phytype = WMPHY_82578;
   7574 		break;
   7575 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   7576 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   7577 		/* 82579 */
   7578 		sc->sc_phytype = WMPHY_82579;
   7579 		break;
   7580 	case PCI_PRODUCT_INTEL_82801I_BM:
   7581 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   7582 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   7583 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   7584 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   7585 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   7586 		/* 82567 */
   7587 		sc->sc_phytype = WMPHY_BM;
   7588 		mii->mii_readreg = wm_gmii_bm_readreg;
   7589 		mii->mii_writereg = wm_gmii_bm_writereg;
   7590 		break;
   7591 	default:
   7592 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   7593 		    && !wm_sgmii_uses_mdio(sc)){
   7594 			/* SGMII */
   7595 			mii->mii_readreg = wm_sgmii_readreg;
   7596 			mii->mii_writereg = wm_sgmii_writereg;
   7597 		} else if (sc->sc_type >= WM_T_80003) {
   7598 			/* 80003 */
   7599 			mii->mii_readreg = wm_gmii_i80003_readreg;
   7600 			mii->mii_writereg = wm_gmii_i80003_writereg;
   7601 		} else if (sc->sc_type >= WM_T_I210) {
   7602 			/* I210 and I211 */
   7603 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   7604 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   7605 		} else if (sc->sc_type >= WM_T_82580) {
   7606 			/* 82580, I350 and I354 */
   7607 			sc->sc_phytype = WMPHY_82580;
   7608 			mii->mii_readreg = wm_gmii_82580_readreg;
   7609 			mii->mii_writereg = wm_gmii_82580_writereg;
   7610 		} else if (sc->sc_type >= WM_T_82544) {
   7611 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   7612 			mii->mii_readreg = wm_gmii_i82544_readreg;
   7613 			mii->mii_writereg = wm_gmii_i82544_writereg;
   7614 		} else {
   7615 			mii->mii_readreg = wm_gmii_i82543_readreg;
   7616 			mii->mii_writereg = wm_gmii_i82543_writereg;
   7617 		}
   7618 		break;
   7619 	}
   7620 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   7621 		/* All PCH* use _hv_ */
   7622 		mii->mii_readreg = wm_gmii_hv_readreg;
   7623 		mii->mii_writereg = wm_gmii_hv_writereg;
   7624 	}
   7625 	mii->mii_statchg = wm_gmii_statchg;
   7626 
   7627 	wm_gmii_reset(sc);
   7628 
   7629 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7630 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   7631 	    wm_gmii_mediastatus);
   7632 
   7633 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   7634 	    || (sc->sc_type == WM_T_82580)
   7635 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   7636 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   7637 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   7638 			/* Attach only one port */
   7639 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   7640 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7641 		} else {
   7642 			int i, id;
   7643 			uint32_t ctrl_ext;
   7644 
   7645 			id = wm_get_phy_id_82575(sc);
   7646 			if (id != -1) {
   7647 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   7648 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   7649 			}
   7650 			if ((id == -1)
   7651 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   7652 				/* Power on sgmii phy if it is disabled */
   7653 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   7654 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   7655 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   7656 				CSR_WRITE_FLUSH(sc);
   7657 				delay(300*1000); /* XXX too long */
   7658 
   7659 				/* from 1 to 8 */
   7660 				for (i = 1; i < 8; i++)
   7661 					mii_attach(sc->sc_dev, &sc->sc_mii,
   7662 					    0xffffffff, i, MII_OFFSET_ANY,
   7663 					    MIIF_DOPAUSE);
   7664 
   7665 				/* restore previous sfp cage power state */
   7666 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   7667 			}
   7668 		}
   7669 	} else {
   7670 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   7671 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7672 	}
   7673 
   7674 	/*
   7675 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   7676 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   7677 	 */
   7678 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   7679 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   7680 		wm_set_mdio_slow_mode_hv(sc);
   7681 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   7682 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7683 	}
   7684 
   7685 	/*
   7686 	 * (For ICH8 variants)
   7687 	 * If PHY detection failed, use BM's r/w function and retry.
   7688 	 */
   7689 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   7690 		/* if failed, retry with *_bm_* */
   7691 		mii->mii_readreg = wm_gmii_bm_readreg;
   7692 		mii->mii_writereg = wm_gmii_bm_writereg;
   7693 
   7694 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   7695 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   7696 	}
   7697 
   7698 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   7699 		/* Any PHY wasn't find */
   7700 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   7701 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   7702 		sc->sc_phytype = WMPHY_NONE;
   7703 	} else {
   7704 		/*
   7705 		 * PHY Found!
   7706 		 * Check PHY type.
   7707 		 */
   7708 		uint32_t model;
   7709 		struct mii_softc *child;
   7710 
   7711 		child = LIST_FIRST(&mii->mii_phys);
   7712 		if (device_is_a(child->mii_dev, "igphy")) {
   7713 			struct igphy_softc *isc = (struct igphy_softc *)child;
   7714 
   7715 			model = isc->sc_mii.mii_mpd_model;
   7716 			if (model == MII_MODEL_yyINTEL_I82566)
   7717 				sc->sc_phytype = WMPHY_IGP_3;
   7718 		}
   7719 
   7720 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   7721 	}
   7722 }
   7723 
   7724 /*
   7725  * wm_gmii_mediachange:	[ifmedia interface function]
   7726  *
   7727  *	Set hardware to newly-selected media on a 1000BASE-T device.
   7728  */
   7729 static int
   7730 wm_gmii_mediachange(struct ifnet *ifp)
   7731 {
   7732 	struct wm_softc *sc = ifp->if_softc;
   7733 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7734 	int rc;
   7735 
   7736 	if ((ifp->if_flags & IFF_UP) == 0)
   7737 		return 0;
   7738 
   7739 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7740 	sc->sc_ctrl |= CTRL_SLU;
   7741 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   7742 	    || (sc->sc_type > WM_T_82543)) {
   7743 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   7744 	} else {
   7745 		sc->sc_ctrl &= ~CTRL_ASDE;
   7746 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7747 		if (ife->ifm_media & IFM_FDX)
   7748 			sc->sc_ctrl |= CTRL_FD;
   7749 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   7750 		case IFM_10_T:
   7751 			sc->sc_ctrl |= CTRL_SPEED_10;
   7752 			break;
   7753 		case IFM_100_TX:
   7754 			sc->sc_ctrl |= CTRL_SPEED_100;
   7755 			break;
   7756 		case IFM_1000_T:
   7757 			sc->sc_ctrl |= CTRL_SPEED_1000;
   7758 			break;
   7759 		default:
   7760 			panic("wm_gmii_mediachange: bad media 0x%x",
   7761 			    ife->ifm_media);
   7762 		}
   7763 	}
   7764 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7765 	if (sc->sc_type <= WM_T_82543)
   7766 		wm_gmii_reset(sc);
   7767 
   7768 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   7769 		return 0;
   7770 	return rc;
   7771 }
   7772 
   7773 /*
   7774  * wm_gmii_mediastatus:	[ifmedia interface function]
   7775  *
   7776  *	Get the current interface media status on a 1000BASE-T device.
   7777  */
   7778 static void
   7779 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7780 {
   7781 	struct wm_softc *sc = ifp->if_softc;
   7782 
   7783 	ether_mediastatus(ifp, ifmr);
   7784 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   7785 	    | sc->sc_flowflags;
   7786 }
   7787 
   7788 #define	MDI_IO		CTRL_SWDPIN(2)
   7789 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   7790 #define	MDI_CLK		CTRL_SWDPIN(3)
   7791 
   7792 static void
   7793 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   7794 {
   7795 	uint32_t i, v;
   7796 
   7797 	v = CSR_READ(sc, WMREG_CTRL);
   7798 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   7799 	v |= MDI_DIR | CTRL_SWDPIO(3);
   7800 
   7801 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   7802 		if (data & i)
   7803 			v |= MDI_IO;
   7804 		else
   7805 			v &= ~MDI_IO;
   7806 		CSR_WRITE(sc, WMREG_CTRL, v);
   7807 		CSR_WRITE_FLUSH(sc);
   7808 		delay(10);
   7809 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7810 		CSR_WRITE_FLUSH(sc);
   7811 		delay(10);
   7812 		CSR_WRITE(sc, WMREG_CTRL, v);
   7813 		CSR_WRITE_FLUSH(sc);
   7814 		delay(10);
   7815 	}
   7816 }
   7817 
   7818 static uint32_t
   7819 wm_i82543_mii_recvbits(struct wm_softc *sc)
   7820 {
   7821 	uint32_t v, i, data = 0;
   7822 
   7823 	v = CSR_READ(sc, WMREG_CTRL);
   7824 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   7825 	v |= CTRL_SWDPIO(3);
   7826 
   7827 	CSR_WRITE(sc, WMREG_CTRL, v);
   7828 	CSR_WRITE_FLUSH(sc);
   7829 	delay(10);
   7830 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7831 	CSR_WRITE_FLUSH(sc);
   7832 	delay(10);
   7833 	CSR_WRITE(sc, WMREG_CTRL, v);
   7834 	CSR_WRITE_FLUSH(sc);
   7835 	delay(10);
   7836 
   7837 	for (i = 0; i < 16; i++) {
   7838 		data <<= 1;
   7839 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7840 		CSR_WRITE_FLUSH(sc);
   7841 		delay(10);
   7842 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   7843 			data |= 1;
   7844 		CSR_WRITE(sc, WMREG_CTRL, v);
   7845 		CSR_WRITE_FLUSH(sc);
   7846 		delay(10);
   7847 	}
   7848 
   7849 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7850 	CSR_WRITE_FLUSH(sc);
   7851 	delay(10);
   7852 	CSR_WRITE(sc, WMREG_CTRL, v);
   7853 	CSR_WRITE_FLUSH(sc);
   7854 	delay(10);
   7855 
   7856 	return data;
   7857 }
   7858 
   7859 #undef MDI_IO
   7860 #undef MDI_DIR
   7861 #undef MDI_CLK
   7862 
   7863 /*
   7864  * wm_gmii_i82543_readreg:	[mii interface function]
   7865  *
   7866  *	Read a PHY register on the GMII (i82543 version).
   7867  */
   7868 static int
   7869 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   7870 {
   7871 	struct wm_softc *sc = device_private(self);
   7872 	int rv;
   7873 
   7874 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   7875 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   7876 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   7877 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   7878 
   7879 	DPRINTF(WM_DEBUG_GMII,
   7880 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   7881 	    device_xname(sc->sc_dev), phy, reg, rv));
   7882 
   7883 	return rv;
   7884 }
   7885 
   7886 /*
   7887  * wm_gmii_i82543_writereg:	[mii interface function]
   7888  *
   7889  *	Write a PHY register on the GMII (i82543 version).
   7890  */
   7891 static void
   7892 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   7893 {
   7894 	struct wm_softc *sc = device_private(self);
   7895 
   7896 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   7897 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   7898 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   7899 	    (MII_COMMAND_START << 30), 32);
   7900 }
   7901 
   7902 /*
   7903  * wm_gmii_i82544_readreg:	[mii interface function]
   7904  *
   7905  *	Read a PHY register on the GMII.
   7906  */
   7907 static int
   7908 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   7909 {
   7910 	struct wm_softc *sc = device_private(self);
   7911 	uint32_t mdic = 0;
   7912 	int i, rv;
   7913 
   7914 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   7915 	    MDIC_REGADD(reg));
   7916 
   7917 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7918 		mdic = CSR_READ(sc, WMREG_MDIC);
   7919 		if (mdic & MDIC_READY)
   7920 			break;
   7921 		delay(50);
   7922 	}
   7923 
   7924 	if ((mdic & MDIC_READY) == 0) {
   7925 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   7926 		    device_xname(sc->sc_dev), phy, reg);
   7927 		rv = 0;
   7928 	} else if (mdic & MDIC_E) {
   7929 #if 0 /* This is normal if no PHY is present. */
   7930 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   7931 		    device_xname(sc->sc_dev), phy, reg);
   7932 #endif
   7933 		rv = 0;
   7934 	} else {
   7935 		rv = MDIC_DATA(mdic);
   7936 		if (rv == 0xffff)
   7937 			rv = 0;
   7938 	}
   7939 
   7940 	return rv;
   7941 }
   7942 
   7943 /*
   7944  * wm_gmii_i82544_writereg:	[mii interface function]
   7945  *
   7946  *	Write a PHY register on the GMII.
   7947  */
   7948 static void
   7949 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   7950 {
   7951 	struct wm_softc *sc = device_private(self);
   7952 	uint32_t mdic = 0;
   7953 	int i;
   7954 
   7955 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   7956 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   7957 
   7958 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7959 		mdic = CSR_READ(sc, WMREG_MDIC);
   7960 		if (mdic & MDIC_READY)
   7961 			break;
   7962 		delay(50);
   7963 	}
   7964 
   7965 	if ((mdic & MDIC_READY) == 0)
   7966 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   7967 		    device_xname(sc->sc_dev), phy, reg);
   7968 	else if (mdic & MDIC_E)
   7969 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   7970 		    device_xname(sc->sc_dev), phy, reg);
   7971 }
   7972 
   7973 /*
   7974  * wm_gmii_i80003_readreg:	[mii interface function]
   7975  *
   7976  *	Read a PHY register on the kumeran
   7977  * This could be handled by the PHY layer if we didn't have to lock the
   7978  * ressource ...
   7979  */
   7980 static int
   7981 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   7982 {
   7983 	struct wm_softc *sc = device_private(self);
   7984 	int sem;
   7985 	int rv;
   7986 
   7987 	if (phy != 1) /* only one PHY on kumeran bus */
   7988 		return 0;
   7989 
   7990 	sem = swfwphysem[sc->sc_funcid];
   7991 	if (wm_get_swfw_semaphore(sc, sem)) {
   7992 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7993 		    __func__);
   7994 		return 0;
   7995 	}
   7996 
   7997 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7998 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7999 		    reg >> GG82563_PAGE_SHIFT);
   8000 	} else {
   8001 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8002 		    reg >> GG82563_PAGE_SHIFT);
   8003 	}
   8004 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8005 	delay(200);
   8006 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8007 	delay(200);
   8008 
   8009 	wm_put_swfw_semaphore(sc, sem);
   8010 	return rv;
   8011 }
   8012 
   8013 /*
   8014  * wm_gmii_i80003_writereg:	[mii interface function]
   8015  *
   8016  *	Write a PHY register on the kumeran.
   8017  * This could be handled by the PHY layer if we didn't have to lock the
   8018  * ressource ...
   8019  */
   8020 static void
   8021 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8022 {
   8023 	struct wm_softc *sc = device_private(self);
   8024 	int sem;
   8025 
   8026 	if (phy != 1) /* only one PHY on kumeran bus */
   8027 		return;
   8028 
   8029 	sem = swfwphysem[sc->sc_funcid];
   8030 	if (wm_get_swfw_semaphore(sc, sem)) {
   8031 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8032 		    __func__);
   8033 		return;
   8034 	}
   8035 
   8036 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8037 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8038 		    reg >> GG82563_PAGE_SHIFT);
   8039 	} else {
   8040 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8041 		    reg >> GG82563_PAGE_SHIFT);
   8042 	}
   8043 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8044 	delay(200);
   8045 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8046 	delay(200);
   8047 
   8048 	wm_put_swfw_semaphore(sc, sem);
   8049 }
   8050 
   8051 /*
   8052  * wm_gmii_bm_readreg:	[mii interface function]
   8053  *
   8054  *	Read a PHY register on the kumeran
   8055  * This could be handled by the PHY layer if we didn't have to lock the
   8056  * ressource ...
   8057  */
   8058 static int
   8059 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8060 {
   8061 	struct wm_softc *sc = device_private(self);
   8062 	int sem;
   8063 	int rv;
   8064 
   8065 	sem = swfwphysem[sc->sc_funcid];
   8066 	if (wm_get_swfw_semaphore(sc, sem)) {
   8067 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8068 		    __func__);
   8069 		return 0;
   8070 	}
   8071 
   8072 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8073 		if (phy == 1)
   8074 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8075 			    reg);
   8076 		else
   8077 			wm_gmii_i82544_writereg(self, phy,
   8078 			    GG82563_PHY_PAGE_SELECT,
   8079 			    reg >> GG82563_PAGE_SHIFT);
   8080 	}
   8081 
   8082 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8083 	wm_put_swfw_semaphore(sc, sem);
   8084 	return rv;
   8085 }
   8086 
   8087 /*
   8088  * wm_gmii_bm_writereg:	[mii interface function]
   8089  *
   8090  *	Write a PHY register on the kumeran.
   8091  * This could be handled by the PHY layer if we didn't have to lock the
   8092  * ressource ...
   8093  */
   8094 static void
   8095 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8096 {
   8097 	struct wm_softc *sc = device_private(self);
   8098 	int sem;
   8099 
   8100 	sem = swfwphysem[sc->sc_funcid];
   8101 	if (wm_get_swfw_semaphore(sc, sem)) {
   8102 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8103 		    __func__);
   8104 		return;
   8105 	}
   8106 
   8107 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8108 		if (phy == 1)
   8109 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8110 			    reg);
   8111 		else
   8112 			wm_gmii_i82544_writereg(self, phy,
   8113 			    GG82563_PHY_PAGE_SELECT,
   8114 			    reg >> GG82563_PAGE_SHIFT);
   8115 	}
   8116 
   8117 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8118 	wm_put_swfw_semaphore(sc, sem);
   8119 }
   8120 
   8121 static void
   8122 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8123 {
   8124 	struct wm_softc *sc = device_private(self);
   8125 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8126 	uint16_t wuce;
   8127 
   8128 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8129 	if (sc->sc_type == WM_T_PCH) {
   8130 		/* XXX e1000 driver do nothing... why? */
   8131 	}
   8132 
   8133 	/* Set page 769 */
   8134 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8135 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8136 
   8137 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8138 
   8139 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8140 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8141 	    wuce | BM_WUC_ENABLE_BIT);
   8142 
   8143 	/* Select page 800 */
   8144 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8145 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8146 
   8147 	/* Write page 800 */
   8148 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8149 
   8150 	if (rd)
   8151 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8152 	else
   8153 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8154 
   8155 	/* Set page 769 */
   8156 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8157 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8158 
   8159 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8160 }
   8161 
   8162 /*
   8163  * wm_gmii_hv_readreg:	[mii interface function]
   8164  *
   8165  *	Read a PHY register on the kumeran
   8166  * This could be handled by the PHY layer if we didn't have to lock the
   8167  * ressource ...
   8168  */
   8169 static int
   8170 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8171 {
   8172 	struct wm_softc *sc = device_private(self);
   8173 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8174 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8175 	uint16_t val;
   8176 	int rv;
   8177 
   8178 	if (wm_get_swfwhw_semaphore(sc)) {
   8179 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8180 		    __func__);
   8181 		return 0;
   8182 	}
   8183 
   8184 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8185 	if (sc->sc_phytype == WMPHY_82577) {
   8186 		/* XXX must write */
   8187 	}
   8188 
   8189 	/* Page 800 works differently than the rest so it has its own func */
   8190 	if (page == BM_WUC_PAGE) {
   8191 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8192 		return val;
   8193 	}
   8194 
   8195 	/*
   8196 	 * Lower than page 768 works differently than the rest so it has its
   8197 	 * own func
   8198 	 */
   8199 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8200 		printf("gmii_hv_readreg!!!\n");
   8201 		return 0;
   8202 	}
   8203 
   8204 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8205 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8206 		    page << BME1000_PAGE_SHIFT);
   8207 	}
   8208 
   8209 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8210 	wm_put_swfwhw_semaphore(sc);
   8211 	return rv;
   8212 }
   8213 
   8214 /*
   8215  * wm_gmii_hv_writereg:	[mii interface function]
   8216  *
   8217  *	Write a PHY register on the kumeran.
   8218  * This could be handled by the PHY layer if we didn't have to lock the
   8219  * ressource ...
   8220  */
   8221 static void
   8222 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8223 {
   8224 	struct wm_softc *sc = device_private(self);
   8225 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8226 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8227 
   8228 	if (wm_get_swfwhw_semaphore(sc)) {
   8229 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8230 		    __func__);
   8231 		return;
   8232 	}
   8233 
   8234 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8235 
   8236 	/* Page 800 works differently than the rest so it has its own func */
   8237 	if (page == BM_WUC_PAGE) {
   8238 		uint16_t tmp;
   8239 
   8240 		tmp = val;
   8241 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8242 		return;
   8243 	}
   8244 
   8245 	/*
   8246 	 * Lower than page 768 works differently than the rest so it has its
   8247 	 * own func
   8248 	 */
   8249 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8250 		printf("gmii_hv_writereg!!!\n");
   8251 		return;
   8252 	}
   8253 
   8254 	/*
   8255 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8256 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8257 	 */
   8258 
   8259 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8260 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8261 		    page << BME1000_PAGE_SHIFT);
   8262 	}
   8263 
   8264 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8265 	wm_put_swfwhw_semaphore(sc);
   8266 }
   8267 
   8268 /*
   8269  * wm_gmii_82580_readreg:	[mii interface function]
   8270  *
   8271  *	Read a PHY register on the 82580 and I350.
   8272  * This could be handled by the PHY layer if we didn't have to lock the
   8273  * ressource ...
   8274  */
   8275 static int
   8276 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8277 {
   8278 	struct wm_softc *sc = device_private(self);
   8279 	int sem;
   8280 	int rv;
   8281 
   8282 	sem = swfwphysem[sc->sc_funcid];
   8283 	if (wm_get_swfw_semaphore(sc, sem)) {
   8284 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8285 		    __func__);
   8286 		return 0;
   8287 	}
   8288 
   8289 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8290 
   8291 	wm_put_swfw_semaphore(sc, sem);
   8292 	return rv;
   8293 }
   8294 
   8295 /*
   8296  * wm_gmii_82580_writereg:	[mii interface function]
   8297  *
   8298  *	Write a PHY register on the 82580 and I350.
   8299  * This could be handled by the PHY layer if we didn't have to lock the
   8300  * ressource ...
   8301  */
   8302 static void
   8303 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8304 {
   8305 	struct wm_softc *sc = device_private(self);
   8306 	int sem;
   8307 
   8308 	sem = swfwphysem[sc->sc_funcid];
   8309 	if (wm_get_swfw_semaphore(sc, sem)) {
   8310 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8311 		    __func__);
   8312 		return;
   8313 	}
   8314 
   8315 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8316 
   8317 	wm_put_swfw_semaphore(sc, sem);
   8318 }
   8319 
   8320 /*
   8321  * wm_gmii_gs40g_readreg:	[mii interface function]
   8322  *
   8323  *	Read a PHY register on the I2100 and I211.
   8324  * This could be handled by the PHY layer if we didn't have to lock the
   8325  * ressource ...
   8326  */
   8327 static int
   8328 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8329 {
   8330 	struct wm_softc *sc = device_private(self);
   8331 	int sem;
   8332 	int page, offset;
   8333 	int rv;
   8334 
   8335 	/* Acquire semaphore */
   8336 	sem = swfwphysem[sc->sc_funcid];
   8337 	if (wm_get_swfw_semaphore(sc, sem)) {
   8338 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8339 		    __func__);
   8340 		return 0;
   8341 	}
   8342 
   8343 	/* Page select */
   8344 	page = reg >> GS40G_PAGE_SHIFT;
   8345 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8346 
   8347 	/* Read reg */
   8348 	offset = reg & GS40G_OFFSET_MASK;
   8349 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8350 
   8351 	wm_put_swfw_semaphore(sc, sem);
   8352 	return rv;
   8353 }
   8354 
   8355 /*
   8356  * wm_gmii_gs40g_writereg:	[mii interface function]
   8357  *
   8358  *	Write a PHY register on the I210 and I211.
   8359  * This could be handled by the PHY layer if we didn't have to lock the
   8360  * ressource ...
   8361  */
   8362 static void
   8363 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8364 {
   8365 	struct wm_softc *sc = device_private(self);
   8366 	int sem;
   8367 	int page, offset;
   8368 
   8369 	/* Acquire semaphore */
   8370 	sem = swfwphysem[sc->sc_funcid];
   8371 	if (wm_get_swfw_semaphore(sc, sem)) {
   8372 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8373 		    __func__);
   8374 		return;
   8375 	}
   8376 
   8377 	/* Page select */
   8378 	page = reg >> GS40G_PAGE_SHIFT;
   8379 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8380 
   8381 	/* Write reg */
   8382 	offset = reg & GS40G_OFFSET_MASK;
   8383 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8384 
   8385 	/* Release semaphore */
   8386 	wm_put_swfw_semaphore(sc, sem);
   8387 }
   8388 
   8389 /*
   8390  * wm_gmii_statchg:	[mii interface function]
   8391  *
   8392  *	Callback from MII layer when media changes.
   8393  */
   8394 static void
   8395 wm_gmii_statchg(struct ifnet *ifp)
   8396 {
   8397 	struct wm_softc *sc = ifp->if_softc;
   8398 	struct mii_data *mii = &sc->sc_mii;
   8399 
   8400 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8401 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8402 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8403 
   8404 	/*
   8405 	 * Get flow control negotiation result.
   8406 	 */
   8407 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8408 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8409 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8410 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8411 	}
   8412 
   8413 	if (sc->sc_flowflags & IFM_FLOW) {
   8414 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8415 			sc->sc_ctrl |= CTRL_TFCE;
   8416 			sc->sc_fcrtl |= FCRTL_XONE;
   8417 		}
   8418 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8419 			sc->sc_ctrl |= CTRL_RFCE;
   8420 	}
   8421 
   8422 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8423 		DPRINTF(WM_DEBUG_LINK,
   8424 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8425 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8426 	} else {
   8427 		DPRINTF(WM_DEBUG_LINK,
   8428 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8429 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8430 	}
   8431 
   8432 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8433 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8434 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8435 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8436 	if (sc->sc_type == WM_T_80003) {
   8437 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8438 		case IFM_1000_T:
   8439 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8440 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8441 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8442 			break;
   8443 		default:
   8444 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8445 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8446 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8447 			break;
   8448 		}
   8449 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8450 	}
   8451 }
   8452 
   8453 /*
   8454  * wm_kmrn_readreg:
   8455  *
   8456  *	Read a kumeran register
   8457  */
   8458 static int
   8459 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8460 {
   8461 	int rv;
   8462 
   8463 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8464 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8465 			aprint_error_dev(sc->sc_dev,
   8466 			    "%s: failed to get semaphore\n", __func__);
   8467 			return 0;
   8468 		}
   8469 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8470 		if (wm_get_swfwhw_semaphore(sc)) {
   8471 			aprint_error_dev(sc->sc_dev,
   8472 			    "%s: failed to get semaphore\n", __func__);
   8473 			return 0;
   8474 		}
   8475 	}
   8476 
   8477 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8478 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8479 	    KUMCTRLSTA_REN);
   8480 	CSR_WRITE_FLUSH(sc);
   8481 	delay(2);
   8482 
   8483 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8484 
   8485 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8486 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8487 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8488 		wm_put_swfwhw_semaphore(sc);
   8489 
   8490 	return rv;
   8491 }
   8492 
   8493 /*
   8494  * wm_kmrn_writereg:
   8495  *
   8496  *	Write a kumeran register
   8497  */
   8498 static void
   8499 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8500 {
   8501 
   8502 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8503 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8504 			aprint_error_dev(sc->sc_dev,
   8505 			    "%s: failed to get semaphore\n", __func__);
   8506 			return;
   8507 		}
   8508 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8509 		if (wm_get_swfwhw_semaphore(sc)) {
   8510 			aprint_error_dev(sc->sc_dev,
   8511 			    "%s: failed to get semaphore\n", __func__);
   8512 			return;
   8513 		}
   8514 	}
   8515 
   8516 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8517 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8518 	    (val & KUMCTRLSTA_MASK));
   8519 
   8520 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8521 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8522 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8523 		wm_put_swfwhw_semaphore(sc);
   8524 }
   8525 
   8526 /* SGMII related */
   8527 
   8528 /*
   8529  * wm_sgmii_uses_mdio
   8530  *
   8531  * Check whether the transaction is to the internal PHY or the external
   8532  * MDIO interface. Return true if it's MDIO.
   8533  */
   8534 static bool
   8535 wm_sgmii_uses_mdio(struct wm_softc *sc)
   8536 {
   8537 	uint32_t reg;
   8538 	bool ismdio = false;
   8539 
   8540 	switch (sc->sc_type) {
   8541 	case WM_T_82575:
   8542 	case WM_T_82576:
   8543 		reg = CSR_READ(sc, WMREG_MDIC);
   8544 		ismdio = ((reg & MDIC_DEST) != 0);
   8545 		break;
   8546 	case WM_T_82580:
   8547 	case WM_T_I350:
   8548 	case WM_T_I354:
   8549 	case WM_T_I210:
   8550 	case WM_T_I211:
   8551 		reg = CSR_READ(sc, WMREG_MDICNFG);
   8552 		ismdio = ((reg & MDICNFG_DEST) != 0);
   8553 		break;
   8554 	default:
   8555 		break;
   8556 	}
   8557 
   8558 	return ismdio;
   8559 }
   8560 
   8561 /*
   8562  * wm_sgmii_readreg:	[mii interface function]
   8563  *
   8564  *	Read a PHY register on the SGMII
   8565  * This could be handled by the PHY layer if we didn't have to lock the
   8566  * ressource ...
   8567  */
   8568 static int
   8569 wm_sgmii_readreg(device_t self, int phy, int reg)
   8570 {
   8571 	struct wm_softc *sc = device_private(self);
   8572 	uint32_t i2ccmd;
   8573 	int i, rv;
   8574 
   8575 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8576 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8577 		    __func__);
   8578 		return 0;
   8579 	}
   8580 
   8581 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8582 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8583 	    | I2CCMD_OPCODE_READ;
   8584 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8585 
   8586 	/* Poll the ready bit */
   8587 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8588 		delay(50);
   8589 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8590 		if (i2ccmd & I2CCMD_READY)
   8591 			break;
   8592 	}
   8593 	if ((i2ccmd & I2CCMD_READY) == 0)
   8594 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   8595 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8596 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8597 
   8598 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   8599 
   8600 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   8601 	return rv;
   8602 }
   8603 
   8604 /*
   8605  * wm_sgmii_writereg:	[mii interface function]
   8606  *
   8607  *	Write a PHY register on the SGMII.
   8608  * This could be handled by the PHY layer if we didn't have to lock the
   8609  * ressource ...
   8610  */
   8611 static void
   8612 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   8613 {
   8614 	struct wm_softc *sc = device_private(self);
   8615 	uint32_t i2ccmd;
   8616 	int i;
   8617 	int val_swapped;
   8618 
   8619 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8620 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8621 		    __func__);
   8622 		return;
   8623 	}
   8624 	/* Swap the data bytes for the I2C interface */
   8625 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   8626 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8627 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8628 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   8629 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8630 
   8631 	/* Poll the ready bit */
   8632 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8633 		delay(50);
   8634 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8635 		if (i2ccmd & I2CCMD_READY)
   8636 			break;
   8637 	}
   8638 	if ((i2ccmd & I2CCMD_READY) == 0)
   8639 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   8640 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8641 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8642 
   8643 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   8644 }
   8645 
   8646 /* TBI related */
   8647 
   8648 /*
   8649  * wm_tbi_mediainit:
   8650  *
   8651  *	Initialize media for use on 1000BASE-X devices.
   8652  */
   8653 static void
   8654 wm_tbi_mediainit(struct wm_softc *sc)
   8655 {
   8656 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8657 	const char *sep = "";
   8658 
   8659 	if (sc->sc_type < WM_T_82543)
   8660 		sc->sc_tipg = TIPG_WM_DFLT;
   8661 	else
   8662 		sc->sc_tipg = TIPG_LG_DFLT;
   8663 
   8664 	sc->sc_tbi_serdes_anegticks = 5;
   8665 
   8666 	/* Initialize our media structures */
   8667 	sc->sc_mii.mii_ifp = ifp;
   8668 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8669 
   8670 	if ((sc->sc_type >= WM_T_82575)
   8671 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   8672 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   8673 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   8674 	else
   8675 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   8676 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   8677 
   8678 	/*
   8679 	 * SWD Pins:
   8680 	 *
   8681 	 *	0 = Link LED (output)
   8682 	 *	1 = Loss Of Signal (input)
   8683 	 */
   8684 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   8685 
   8686 	/* XXX Perhaps this is only for TBI */
   8687 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   8688 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   8689 
   8690 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8691 		sc->sc_ctrl &= ~CTRL_LRST;
   8692 
   8693 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8694 
   8695 #define	ADD(ss, mm, dd)							\
   8696 do {									\
   8697 	aprint_normal("%s%s", sep, ss);					\
   8698 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   8699 	sep = ", ";							\
   8700 } while (/*CONSTCOND*/0)
   8701 
   8702 	aprint_normal_dev(sc->sc_dev, "");
   8703 
   8704 	/* Only 82545 is LX */
   8705 	if (sc->sc_type == WM_T_82545) {
   8706 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   8707 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   8708 	} else {
   8709 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   8710 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   8711 	}
   8712 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   8713 	aprint_normal("\n");
   8714 
   8715 #undef ADD
   8716 
   8717 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   8718 }
   8719 
   8720 /*
   8721  * wm_tbi_mediachange:	[ifmedia interface function]
   8722  *
   8723  *	Set hardware to newly-selected media on a 1000BASE-X device.
   8724  */
   8725 static int
   8726 wm_tbi_mediachange(struct ifnet *ifp)
   8727 {
   8728 	struct wm_softc *sc = ifp->if_softc;
   8729 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8730 	uint32_t status;
   8731 	int i;
   8732 
   8733 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   8734 		/* XXX need some work for >= 82571 and < 82575 */
   8735 		if (sc->sc_type < WM_T_82575)
   8736 			return 0;
   8737 	}
   8738 
   8739 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   8740 	    || (sc->sc_type >= WM_T_82575))
   8741 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   8742 
   8743 	sc->sc_ctrl &= ~CTRL_LRST;
   8744 	sc->sc_txcw = TXCW_ANE;
   8745 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8746 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   8747 	else if (ife->ifm_media & IFM_FDX)
   8748 		sc->sc_txcw |= TXCW_FD;
   8749 	else
   8750 		sc->sc_txcw |= TXCW_HD;
   8751 
   8752 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   8753 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   8754 
   8755 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   8756 		    device_xname(sc->sc_dev), sc->sc_txcw));
   8757 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8758 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8759 	CSR_WRITE_FLUSH(sc);
   8760 	delay(1000);
   8761 
   8762 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   8763 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   8764 
   8765 	/*
   8766 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   8767 	 * optics detect a signal, 0 if they don't.
   8768 	 */
   8769 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   8770 		/* Have signal; wait for the link to come up. */
   8771 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   8772 			delay(10000);
   8773 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   8774 				break;
   8775 		}
   8776 
   8777 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   8778 			    device_xname(sc->sc_dev),i));
   8779 
   8780 		status = CSR_READ(sc, WMREG_STATUS);
   8781 		DPRINTF(WM_DEBUG_LINK,
   8782 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   8783 			device_xname(sc->sc_dev),status, STATUS_LU));
   8784 		if (status & STATUS_LU) {
   8785 			/* Link is up. */
   8786 			DPRINTF(WM_DEBUG_LINK,
   8787 			    ("%s: LINK: set media -> link up %s\n",
   8788 			    device_xname(sc->sc_dev),
   8789 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8790 
   8791 			/*
   8792 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8793 			 * so we should update sc->sc_ctrl
   8794 			 */
   8795 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8796 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8797 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8798 			if (status & STATUS_FD)
   8799 				sc->sc_tctl |=
   8800 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8801 			else
   8802 				sc->sc_tctl |=
   8803 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8804 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   8805 				sc->sc_fcrtl |= FCRTL_XONE;
   8806 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8807 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8808 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8809 				      sc->sc_fcrtl);
   8810 			sc->sc_tbi_linkup = 1;
   8811 		} else {
   8812 			if (i == WM_LINKUP_TIMEOUT)
   8813 				wm_check_for_link(sc);
   8814 			/* Link is down. */
   8815 			DPRINTF(WM_DEBUG_LINK,
   8816 			    ("%s: LINK: set media -> link down\n",
   8817 			    device_xname(sc->sc_dev)));
   8818 			sc->sc_tbi_linkup = 0;
   8819 		}
   8820 	} else {
   8821 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   8822 		    device_xname(sc->sc_dev)));
   8823 		sc->sc_tbi_linkup = 0;
   8824 	}
   8825 
   8826 	wm_tbi_serdes_set_linkled(sc);
   8827 
   8828 	return 0;
   8829 }
   8830 
   8831 /*
   8832  * wm_tbi_mediastatus:	[ifmedia interface function]
   8833  *
   8834  *	Get the current interface media status on a 1000BASE-X device.
   8835  */
   8836 static void
   8837 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8838 {
   8839 	struct wm_softc *sc = ifp->if_softc;
   8840 	uint32_t ctrl, status;
   8841 
   8842 	ifmr->ifm_status = IFM_AVALID;
   8843 	ifmr->ifm_active = IFM_ETHER;
   8844 
   8845 	status = CSR_READ(sc, WMREG_STATUS);
   8846 	if ((status & STATUS_LU) == 0) {
   8847 		ifmr->ifm_active |= IFM_NONE;
   8848 		return;
   8849 	}
   8850 
   8851 	ifmr->ifm_status |= IFM_ACTIVE;
   8852 	/* Only 82545 is LX */
   8853 	if (sc->sc_type == WM_T_82545)
   8854 		ifmr->ifm_active |= IFM_1000_LX;
   8855 	else
   8856 		ifmr->ifm_active |= IFM_1000_SX;
   8857 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   8858 		ifmr->ifm_active |= IFM_FDX;
   8859 	else
   8860 		ifmr->ifm_active |= IFM_HDX;
   8861 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8862 	if (ctrl & CTRL_RFCE)
   8863 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   8864 	if (ctrl & CTRL_TFCE)
   8865 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   8866 }
   8867 
   8868 /* XXX TBI only */
   8869 static int
   8870 wm_check_for_link(struct wm_softc *sc)
   8871 {
   8872 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8873 	uint32_t rxcw;
   8874 	uint32_t ctrl;
   8875 	uint32_t status;
   8876 	uint32_t sig;
   8877 
   8878 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   8879 		/* XXX need some work for >= 82571 */
   8880 		if (sc->sc_type >= WM_T_82571) {
   8881 			sc->sc_tbi_linkup = 1;
   8882 			return 0;
   8883 		}
   8884 	}
   8885 
   8886 	rxcw = CSR_READ(sc, WMREG_RXCW);
   8887 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8888 	status = CSR_READ(sc, WMREG_STATUS);
   8889 
   8890 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   8891 
   8892 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   8893 		device_xname(sc->sc_dev), __func__,
   8894 		((ctrl & CTRL_SWDPIN(1)) == sig),
   8895 		((status & STATUS_LU) != 0),
   8896 		((rxcw & RXCW_C) != 0)
   8897 		    ));
   8898 
   8899 	/*
   8900 	 * SWDPIN   LU RXCW
   8901 	 *      0    0    0
   8902 	 *      0    0    1	(should not happen)
   8903 	 *      0    1    0	(should not happen)
   8904 	 *      0    1    1	(should not happen)
   8905 	 *      1    0    0	Disable autonego and force linkup
   8906 	 *      1    0    1	got /C/ but not linkup yet
   8907 	 *      1    1    0	(linkup)
   8908 	 *      1    1    1	If IFM_AUTO, back to autonego
   8909 	 *
   8910 	 */
   8911 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8912 	    && ((status & STATUS_LU) == 0)
   8913 	    && ((rxcw & RXCW_C) == 0)) {
   8914 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   8915 			__func__));
   8916 		sc->sc_tbi_linkup = 0;
   8917 		/* Disable auto-negotiation in the TXCW register */
   8918 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   8919 
   8920 		/*
   8921 		 * Force link-up and also force full-duplex.
   8922 		 *
   8923 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   8924 		 * so we should update sc->sc_ctrl
   8925 		 */
   8926 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   8927 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8928 	} else if (((status & STATUS_LU) != 0)
   8929 	    && ((rxcw & RXCW_C) != 0)
   8930 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   8931 		sc->sc_tbi_linkup = 1;
   8932 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   8933 			__func__));
   8934 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8935 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   8936 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8937 	    && ((rxcw & RXCW_C) != 0)) {
   8938 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   8939 	} else {
   8940 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   8941 			status));
   8942 	}
   8943 
   8944 	return 0;
   8945 }
   8946 
   8947 /*
   8948  * wm_tbi_tick:
   8949  *
   8950  *	Check the link on TBI devices.
   8951  *	This function acts as mii_tick().
   8952  */
   8953 static void
   8954 wm_tbi_tick(struct wm_softc *sc)
   8955 {
   8956 	struct wm_txqueue *txq = sc->sc_txq;
   8957 	struct mii_data *mii = &sc->sc_mii;
   8958 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   8959 	uint32_t status;
   8960 
   8961 	KASSERT(WM_TX_LOCKED(txq));
   8962 
   8963 	status = CSR_READ(sc, WMREG_STATUS);
   8964 
   8965 	/* XXX is this needed? */
   8966 	(void)CSR_READ(sc, WMREG_RXCW);
   8967 	(void)CSR_READ(sc, WMREG_CTRL);
   8968 
   8969 	/* set link status */
   8970 	if ((status & STATUS_LU) == 0) {
   8971 		DPRINTF(WM_DEBUG_LINK,
   8972 		    ("%s: LINK: checklink -> down\n",
   8973 			device_xname(sc->sc_dev)));
   8974 		sc->sc_tbi_linkup = 0;
   8975 	} else if (sc->sc_tbi_linkup == 0) {
   8976 		DPRINTF(WM_DEBUG_LINK,
   8977 		    ("%s: LINK: checklink -> up %s\n",
   8978 			device_xname(sc->sc_dev),
   8979 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8980 		sc->sc_tbi_linkup = 1;
   8981 		sc->sc_tbi_serdes_ticks = 0;
   8982 	}
   8983 
   8984 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   8985 		goto setled;
   8986 
   8987 	if ((status & STATUS_LU) == 0) {
   8988 		sc->sc_tbi_linkup = 0;
   8989 		/* If the timer expired, retry autonegotiation */
   8990 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8991 		    && (++sc->sc_tbi_serdes_ticks
   8992 			>= sc->sc_tbi_serdes_anegticks)) {
   8993 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   8994 			sc->sc_tbi_serdes_ticks = 0;
   8995 			/*
   8996 			 * Reset the link, and let autonegotiation do
   8997 			 * its thing
   8998 			 */
   8999 			sc->sc_ctrl |= CTRL_LRST;
   9000 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9001 			CSR_WRITE_FLUSH(sc);
   9002 			delay(1000);
   9003 			sc->sc_ctrl &= ~CTRL_LRST;
   9004 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9005 			CSR_WRITE_FLUSH(sc);
   9006 			delay(1000);
   9007 			CSR_WRITE(sc, WMREG_TXCW,
   9008 			    sc->sc_txcw & ~TXCW_ANE);
   9009 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9010 		}
   9011 	}
   9012 
   9013 setled:
   9014 	wm_tbi_serdes_set_linkled(sc);
   9015 }
   9016 
   9017 /* SERDES related */
   9018 static void
   9019 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9020 {
   9021 	uint32_t reg;
   9022 
   9023 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9024 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9025 		return;
   9026 
   9027 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9028 	reg |= PCS_CFG_PCS_EN;
   9029 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9030 
   9031 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9032 	reg &= ~CTRL_EXT_SWDPIN(3);
   9033 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9034 	CSR_WRITE_FLUSH(sc);
   9035 }
   9036 
   9037 static int
   9038 wm_serdes_mediachange(struct ifnet *ifp)
   9039 {
   9040 	struct wm_softc *sc = ifp->if_softc;
   9041 	bool pcs_autoneg = true; /* XXX */
   9042 	uint32_t ctrl_ext, pcs_lctl, reg;
   9043 
   9044 	/* XXX Currently, this function is not called on 8257[12] */
   9045 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9046 	    || (sc->sc_type >= WM_T_82575))
   9047 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9048 
   9049 	wm_serdes_power_up_link_82575(sc);
   9050 
   9051 	sc->sc_ctrl |= CTRL_SLU;
   9052 
   9053 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9054 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9055 
   9056 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9057 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9058 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9059 	case CTRL_EXT_LINK_MODE_SGMII:
   9060 		pcs_autoneg = true;
   9061 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9062 		break;
   9063 	case CTRL_EXT_LINK_MODE_1000KX:
   9064 		pcs_autoneg = false;
   9065 		/* FALLTHROUGH */
   9066 	default:
   9067 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   9068 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9069 				pcs_autoneg = false;
   9070 		}
   9071 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9072 		    | CTRL_FRCFDX;
   9073 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9074 	}
   9075 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9076 
   9077 	if (pcs_autoneg) {
   9078 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9079 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9080 
   9081 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9082 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9083 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9084 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9085 	} else
   9086 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9087 
   9088 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9089 
   9090 
   9091 	return 0;
   9092 }
   9093 
   9094 static void
   9095 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9096 {
   9097 	struct wm_softc *sc = ifp->if_softc;
   9098 	struct mii_data *mii = &sc->sc_mii;
   9099 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9100 	uint32_t pcs_adv, pcs_lpab, reg;
   9101 
   9102 	ifmr->ifm_status = IFM_AVALID;
   9103 	ifmr->ifm_active = IFM_ETHER;
   9104 
   9105 	/* Check PCS */
   9106 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9107 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9108 		ifmr->ifm_active |= IFM_NONE;
   9109 		sc->sc_tbi_linkup = 0;
   9110 		goto setled;
   9111 	}
   9112 
   9113 	sc->sc_tbi_linkup = 1;
   9114 	ifmr->ifm_status |= IFM_ACTIVE;
   9115 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9116 	if ((reg & PCS_LSTS_FDX) != 0)
   9117 		ifmr->ifm_active |= IFM_FDX;
   9118 	else
   9119 		ifmr->ifm_active |= IFM_HDX;
   9120 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9121 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9122 		/* Check flow */
   9123 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9124 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9125 			printf("XXX LINKOK but not ACOMP\n");
   9126 			goto setled;
   9127 		}
   9128 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9129 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9130 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   9131 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9132 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9133 			mii->mii_media_active |= IFM_FLOW
   9134 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9135 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9136 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9137 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9138 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9139 			mii->mii_media_active |= IFM_FLOW
   9140 			    | IFM_ETH_TXPAUSE;
   9141 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9142 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9143 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9144 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9145 			mii->mii_media_active |= IFM_FLOW
   9146 			    | IFM_ETH_RXPAUSE;
   9147 		} else {
   9148 		}
   9149 	}
   9150 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9151 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9152 setled:
   9153 	wm_tbi_serdes_set_linkled(sc);
   9154 }
   9155 
   9156 /*
   9157  * wm_serdes_tick:
   9158  *
   9159  *	Check the link on serdes devices.
   9160  */
   9161 static void
   9162 wm_serdes_tick(struct wm_softc *sc)
   9163 {
   9164 	struct wm_txqueue *txq = sc->sc_txq;
   9165 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9166 	struct mii_data *mii = &sc->sc_mii;
   9167 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9168 	uint32_t reg;
   9169 
   9170 	KASSERT(WM_TX_LOCKED(txq));
   9171 
   9172 	mii->mii_media_status = IFM_AVALID;
   9173 	mii->mii_media_active = IFM_ETHER;
   9174 
   9175 	/* Check PCS */
   9176 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9177 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9178 		mii->mii_media_status |= IFM_ACTIVE;
   9179 		sc->sc_tbi_linkup = 1;
   9180 		sc->sc_tbi_serdes_ticks = 0;
   9181 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9182 		if ((reg & PCS_LSTS_FDX) != 0)
   9183 			mii->mii_media_active |= IFM_FDX;
   9184 		else
   9185 			mii->mii_media_active |= IFM_HDX;
   9186 	} else {
   9187 		mii->mii_media_status |= IFM_NONE;
   9188 		sc->sc_tbi_linkup = 0;
   9189 		    /* If the timer expired, retry autonegotiation */
   9190 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9191 		    && (++sc->sc_tbi_serdes_ticks
   9192 			>= sc->sc_tbi_serdes_anegticks)) {
   9193 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9194 			sc->sc_tbi_serdes_ticks = 0;
   9195 			/* XXX */
   9196 			wm_serdes_mediachange(ifp);
   9197 		}
   9198 	}
   9199 
   9200 	wm_tbi_serdes_set_linkled(sc);
   9201 }
   9202 
   9203 /* SFP related */
   9204 
   9205 static int
   9206 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9207 {
   9208 	uint32_t i2ccmd;
   9209 	int i;
   9210 
   9211 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9212 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9213 
   9214 	/* Poll the ready bit */
   9215 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9216 		delay(50);
   9217 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9218 		if (i2ccmd & I2CCMD_READY)
   9219 			break;
   9220 	}
   9221 	if ((i2ccmd & I2CCMD_READY) == 0)
   9222 		return -1;
   9223 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9224 		return -1;
   9225 
   9226 	*data = i2ccmd & 0x00ff;
   9227 
   9228 	return 0;
   9229 }
   9230 
   9231 static uint32_t
   9232 wm_sfp_get_media_type(struct wm_softc *sc)
   9233 {
   9234 	uint32_t ctrl_ext;
   9235 	uint8_t val = 0;
   9236 	int timeout = 3;
   9237 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9238 	int rv = -1;
   9239 
   9240 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9241 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9242 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9243 	CSR_WRITE_FLUSH(sc);
   9244 
   9245 	/* Read SFP module data */
   9246 	while (timeout) {
   9247 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9248 		if (rv == 0)
   9249 			break;
   9250 		delay(100*1000); /* XXX too big */
   9251 		timeout--;
   9252 	}
   9253 	if (rv != 0)
   9254 		goto out;
   9255 	switch (val) {
   9256 	case SFF_SFP_ID_SFF:
   9257 		aprint_normal_dev(sc->sc_dev,
   9258 		    "Module/Connector soldered to board\n");
   9259 		break;
   9260 	case SFF_SFP_ID_SFP:
   9261 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9262 		break;
   9263 	case SFF_SFP_ID_UNKNOWN:
   9264 		goto out;
   9265 	default:
   9266 		break;
   9267 	}
   9268 
   9269 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9270 	if (rv != 0) {
   9271 		goto out;
   9272 	}
   9273 
   9274 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9275 		mediatype = WM_MEDIATYPE_SERDES;
   9276 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9277 		sc->sc_flags |= WM_F_SGMII;
   9278 		mediatype = WM_MEDIATYPE_COPPER;
   9279 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9280 		sc->sc_flags |= WM_F_SGMII;
   9281 		mediatype = WM_MEDIATYPE_SERDES;
   9282 	}
   9283 
   9284 out:
   9285 	/* Restore I2C interface setting */
   9286 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9287 
   9288 	return mediatype;
   9289 }
   9290 /*
   9291  * NVM related.
   9292  * Microwire, SPI (w/wo EERD) and Flash.
   9293  */
   9294 
   9295 /* Both spi and uwire */
   9296 
   9297 /*
   9298  * wm_eeprom_sendbits:
   9299  *
   9300  *	Send a series of bits to the EEPROM.
   9301  */
   9302 static void
   9303 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9304 {
   9305 	uint32_t reg;
   9306 	int x;
   9307 
   9308 	reg = CSR_READ(sc, WMREG_EECD);
   9309 
   9310 	for (x = nbits; x > 0; x--) {
   9311 		if (bits & (1U << (x - 1)))
   9312 			reg |= EECD_DI;
   9313 		else
   9314 			reg &= ~EECD_DI;
   9315 		CSR_WRITE(sc, WMREG_EECD, reg);
   9316 		CSR_WRITE_FLUSH(sc);
   9317 		delay(2);
   9318 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9319 		CSR_WRITE_FLUSH(sc);
   9320 		delay(2);
   9321 		CSR_WRITE(sc, WMREG_EECD, reg);
   9322 		CSR_WRITE_FLUSH(sc);
   9323 		delay(2);
   9324 	}
   9325 }
   9326 
   9327 /*
   9328  * wm_eeprom_recvbits:
   9329  *
   9330  *	Receive a series of bits from the EEPROM.
   9331  */
   9332 static void
   9333 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9334 {
   9335 	uint32_t reg, val;
   9336 	int x;
   9337 
   9338 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9339 
   9340 	val = 0;
   9341 	for (x = nbits; x > 0; x--) {
   9342 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9343 		CSR_WRITE_FLUSH(sc);
   9344 		delay(2);
   9345 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9346 			val |= (1U << (x - 1));
   9347 		CSR_WRITE(sc, WMREG_EECD, reg);
   9348 		CSR_WRITE_FLUSH(sc);
   9349 		delay(2);
   9350 	}
   9351 	*valp = val;
   9352 }
   9353 
   9354 /* Microwire */
   9355 
   9356 /*
   9357  * wm_nvm_read_uwire:
   9358  *
   9359  *	Read a word from the EEPROM using the MicroWire protocol.
   9360  */
   9361 static int
   9362 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9363 {
   9364 	uint32_t reg, val;
   9365 	int i;
   9366 
   9367 	for (i = 0; i < wordcnt; i++) {
   9368 		/* Clear SK and DI. */
   9369 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9370 		CSR_WRITE(sc, WMREG_EECD, reg);
   9371 
   9372 		/*
   9373 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9374 		 * and Xen.
   9375 		 *
   9376 		 * We use this workaround only for 82540 because qemu's
   9377 		 * e1000 act as 82540.
   9378 		 */
   9379 		if (sc->sc_type == WM_T_82540) {
   9380 			reg |= EECD_SK;
   9381 			CSR_WRITE(sc, WMREG_EECD, reg);
   9382 			reg &= ~EECD_SK;
   9383 			CSR_WRITE(sc, WMREG_EECD, reg);
   9384 			CSR_WRITE_FLUSH(sc);
   9385 			delay(2);
   9386 		}
   9387 		/* XXX: end of workaround */
   9388 
   9389 		/* Set CHIP SELECT. */
   9390 		reg |= EECD_CS;
   9391 		CSR_WRITE(sc, WMREG_EECD, reg);
   9392 		CSR_WRITE_FLUSH(sc);
   9393 		delay(2);
   9394 
   9395 		/* Shift in the READ command. */
   9396 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9397 
   9398 		/* Shift in address. */
   9399 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9400 
   9401 		/* Shift out the data. */
   9402 		wm_eeprom_recvbits(sc, &val, 16);
   9403 		data[i] = val & 0xffff;
   9404 
   9405 		/* Clear CHIP SELECT. */
   9406 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9407 		CSR_WRITE(sc, WMREG_EECD, reg);
   9408 		CSR_WRITE_FLUSH(sc);
   9409 		delay(2);
   9410 	}
   9411 
   9412 	return 0;
   9413 }
   9414 
   9415 /* SPI */
   9416 
   9417 /*
   9418  * Set SPI and FLASH related information from the EECD register.
   9419  * For 82541 and 82547, the word size is taken from EEPROM.
   9420  */
   9421 static int
   9422 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9423 {
   9424 	int size;
   9425 	uint32_t reg;
   9426 	uint16_t data;
   9427 
   9428 	reg = CSR_READ(sc, WMREG_EECD);
   9429 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9430 
   9431 	/* Read the size of NVM from EECD by default */
   9432 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9433 	switch (sc->sc_type) {
   9434 	case WM_T_82541:
   9435 	case WM_T_82541_2:
   9436 	case WM_T_82547:
   9437 	case WM_T_82547_2:
   9438 		/* Set dummy value to access EEPROM */
   9439 		sc->sc_nvm_wordsize = 64;
   9440 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9441 		reg = data;
   9442 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9443 		if (size == 0)
   9444 			size = 6; /* 64 word size */
   9445 		else
   9446 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9447 		break;
   9448 	case WM_T_80003:
   9449 	case WM_T_82571:
   9450 	case WM_T_82572:
   9451 	case WM_T_82573: /* SPI case */
   9452 	case WM_T_82574: /* SPI case */
   9453 	case WM_T_82583: /* SPI case */
   9454 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9455 		if (size > 14)
   9456 			size = 14;
   9457 		break;
   9458 	case WM_T_82575:
   9459 	case WM_T_82576:
   9460 	case WM_T_82580:
   9461 	case WM_T_I350:
   9462 	case WM_T_I354:
   9463 	case WM_T_I210:
   9464 	case WM_T_I211:
   9465 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9466 		if (size > 15)
   9467 			size = 15;
   9468 		break;
   9469 	default:
   9470 		aprint_error_dev(sc->sc_dev,
   9471 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9472 		return -1;
   9473 		break;
   9474 	}
   9475 
   9476 	sc->sc_nvm_wordsize = 1 << size;
   9477 
   9478 	return 0;
   9479 }
   9480 
   9481 /*
   9482  * wm_nvm_ready_spi:
   9483  *
   9484  *	Wait for a SPI EEPROM to be ready for commands.
   9485  */
   9486 static int
   9487 wm_nvm_ready_spi(struct wm_softc *sc)
   9488 {
   9489 	uint32_t val;
   9490 	int usec;
   9491 
   9492 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9493 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9494 		wm_eeprom_recvbits(sc, &val, 8);
   9495 		if ((val & SPI_SR_RDY) == 0)
   9496 			break;
   9497 	}
   9498 	if (usec >= SPI_MAX_RETRIES) {
   9499 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   9500 		return 1;
   9501 	}
   9502 	return 0;
   9503 }
   9504 
   9505 /*
   9506  * wm_nvm_read_spi:
   9507  *
   9508  *	Read a work from the EEPROM using the SPI protocol.
   9509  */
   9510 static int
   9511 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9512 {
   9513 	uint32_t reg, val;
   9514 	int i;
   9515 	uint8_t opc;
   9516 
   9517 	/* Clear SK and CS. */
   9518 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   9519 	CSR_WRITE(sc, WMREG_EECD, reg);
   9520 	CSR_WRITE_FLUSH(sc);
   9521 	delay(2);
   9522 
   9523 	if (wm_nvm_ready_spi(sc))
   9524 		return 1;
   9525 
   9526 	/* Toggle CS to flush commands. */
   9527 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   9528 	CSR_WRITE_FLUSH(sc);
   9529 	delay(2);
   9530 	CSR_WRITE(sc, WMREG_EECD, reg);
   9531 	CSR_WRITE_FLUSH(sc);
   9532 	delay(2);
   9533 
   9534 	opc = SPI_OPC_READ;
   9535 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   9536 		opc |= SPI_OPC_A8;
   9537 
   9538 	wm_eeprom_sendbits(sc, opc, 8);
   9539 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   9540 
   9541 	for (i = 0; i < wordcnt; i++) {
   9542 		wm_eeprom_recvbits(sc, &val, 16);
   9543 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   9544 	}
   9545 
   9546 	/* Raise CS and clear SK. */
   9547 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   9548 	CSR_WRITE(sc, WMREG_EECD, reg);
   9549 	CSR_WRITE_FLUSH(sc);
   9550 	delay(2);
   9551 
   9552 	return 0;
   9553 }
   9554 
   9555 /* Using with EERD */
   9556 
   9557 static int
   9558 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   9559 {
   9560 	uint32_t attempts = 100000;
   9561 	uint32_t i, reg = 0;
   9562 	int32_t done = -1;
   9563 
   9564 	for (i = 0; i < attempts; i++) {
   9565 		reg = CSR_READ(sc, rw);
   9566 
   9567 		if (reg & EERD_DONE) {
   9568 			done = 0;
   9569 			break;
   9570 		}
   9571 		delay(5);
   9572 	}
   9573 
   9574 	return done;
   9575 }
   9576 
   9577 static int
   9578 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   9579     uint16_t *data)
   9580 {
   9581 	int i, eerd = 0;
   9582 	int error = 0;
   9583 
   9584 	for (i = 0; i < wordcnt; i++) {
   9585 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   9586 
   9587 		CSR_WRITE(sc, WMREG_EERD, eerd);
   9588 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   9589 		if (error != 0)
   9590 			break;
   9591 
   9592 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   9593 	}
   9594 
   9595 	return error;
   9596 }
   9597 
   9598 /* Flash */
   9599 
   9600 static int
   9601 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   9602 {
   9603 	uint32_t eecd;
   9604 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   9605 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   9606 	uint8_t sig_byte = 0;
   9607 
   9608 	switch (sc->sc_type) {
   9609 	case WM_T_ICH8:
   9610 	case WM_T_ICH9:
   9611 		eecd = CSR_READ(sc, WMREG_EECD);
   9612 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   9613 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   9614 			return 0;
   9615 		}
   9616 		/* FALLTHROUGH */
   9617 	default:
   9618 		/* Default to 0 */
   9619 		*bank = 0;
   9620 
   9621 		/* Check bank 0 */
   9622 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   9623 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9624 			*bank = 0;
   9625 			return 0;
   9626 		}
   9627 
   9628 		/* Check bank 1 */
   9629 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   9630 		    &sig_byte);
   9631 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9632 			*bank = 1;
   9633 			return 0;
   9634 		}
   9635 	}
   9636 
   9637 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   9638 		device_xname(sc->sc_dev)));
   9639 	return -1;
   9640 }
   9641 
   9642 /******************************************************************************
   9643  * This function does initial flash setup so that a new read/write/erase cycle
   9644  * can be started.
   9645  *
   9646  * sc - The pointer to the hw structure
   9647  ****************************************************************************/
   9648 static int32_t
   9649 wm_ich8_cycle_init(struct wm_softc *sc)
   9650 {
   9651 	uint16_t hsfsts;
   9652 	int32_t error = 1;
   9653 	int32_t i     = 0;
   9654 
   9655 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9656 
   9657 	/* May be check the Flash Des Valid bit in Hw status */
   9658 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   9659 		return error;
   9660 	}
   9661 
   9662 	/* Clear FCERR in Hw status by writing 1 */
   9663 	/* Clear DAEL in Hw status by writing a 1 */
   9664 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   9665 
   9666 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   9667 
   9668 	/*
   9669 	 * Either we should have a hardware SPI cycle in progress bit to check
   9670 	 * against, in order to start a new cycle or FDONE bit should be
   9671 	 * changed in the hardware so that it is 1 after harware reset, which
   9672 	 * can then be used as an indication whether a cycle is in progress or
   9673 	 * has been completed .. we should also have some software semaphore
   9674 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   9675 	 * threads access to those bits can be sequentiallized or a way so that
   9676 	 * 2 threads dont start the cycle at the same time
   9677 	 */
   9678 
   9679 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   9680 		/*
   9681 		 * There is no cycle running at present, so we can start a
   9682 		 * cycle
   9683 		 */
   9684 
   9685 		/* Begin by setting Flash Cycle Done. */
   9686 		hsfsts |= HSFSTS_DONE;
   9687 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   9688 		error = 0;
   9689 	} else {
   9690 		/*
   9691 		 * otherwise poll for sometime so the current cycle has a
   9692 		 * chance to end before giving up.
   9693 		 */
   9694 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   9695 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9696 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   9697 				error = 0;
   9698 				break;
   9699 			}
   9700 			delay(1);
   9701 		}
   9702 		if (error == 0) {
   9703 			/*
   9704 			 * Successful in waiting for previous cycle to timeout,
   9705 			 * now set the Flash Cycle Done.
   9706 			 */
   9707 			hsfsts |= HSFSTS_DONE;
   9708 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   9709 		}
   9710 	}
   9711 	return error;
   9712 }
   9713 
   9714 /******************************************************************************
   9715  * This function starts a flash cycle and waits for its completion
   9716  *
   9717  * sc - The pointer to the hw structure
   9718  ****************************************************************************/
   9719 static int32_t
   9720 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   9721 {
   9722 	uint16_t hsflctl;
   9723 	uint16_t hsfsts;
   9724 	int32_t error = 1;
   9725 	uint32_t i = 0;
   9726 
   9727 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   9728 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   9729 	hsflctl |= HSFCTL_GO;
   9730 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   9731 
   9732 	/* Wait till FDONE bit is set to 1 */
   9733 	do {
   9734 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9735 		if (hsfsts & HSFSTS_DONE)
   9736 			break;
   9737 		delay(1);
   9738 		i++;
   9739 	} while (i < timeout);
   9740 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   9741 		error = 0;
   9742 
   9743 	return error;
   9744 }
   9745 
   9746 /******************************************************************************
   9747  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   9748  *
   9749  * sc - The pointer to the hw structure
   9750  * index - The index of the byte or word to read.
   9751  * size - Size of data to read, 1=byte 2=word
   9752  * data - Pointer to the word to store the value read.
   9753  *****************************************************************************/
   9754 static int32_t
   9755 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   9756     uint32_t size, uint16_t *data)
   9757 {
   9758 	uint16_t hsfsts;
   9759 	uint16_t hsflctl;
   9760 	uint32_t flash_linear_address;
   9761 	uint32_t flash_data = 0;
   9762 	int32_t error = 1;
   9763 	int32_t count = 0;
   9764 
   9765 	if (size < 1  || size > 2 || data == 0x0 ||
   9766 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   9767 		return error;
   9768 
   9769 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   9770 	    sc->sc_ich8_flash_base;
   9771 
   9772 	do {
   9773 		delay(1);
   9774 		/* Steps */
   9775 		error = wm_ich8_cycle_init(sc);
   9776 		if (error)
   9777 			break;
   9778 
   9779 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   9780 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   9781 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   9782 		    & HSFCTL_BCOUNT_MASK;
   9783 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   9784 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   9785 
   9786 		/*
   9787 		 * Write the last 24 bits of index into Flash Linear address
   9788 		 * field in Flash Address
   9789 		 */
   9790 		/* TODO: TBD maybe check the index against the size of flash */
   9791 
   9792 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   9793 
   9794 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   9795 
   9796 		/*
   9797 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   9798 		 * the whole sequence a few more times, else read in (shift in)
   9799 		 * the Flash Data0, the order is least significant byte first
   9800 		 * msb to lsb
   9801 		 */
   9802 		if (error == 0) {
   9803 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   9804 			if (size == 1)
   9805 				*data = (uint8_t)(flash_data & 0x000000FF);
   9806 			else if (size == 2)
   9807 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   9808 			break;
   9809 		} else {
   9810 			/*
   9811 			 * If we've gotten here, then things are probably
   9812 			 * completely hosed, but if the error condition is
   9813 			 * detected, it won't hurt to give it another try...
   9814 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   9815 			 */
   9816 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   9817 			if (hsfsts & HSFSTS_ERR) {
   9818 				/* Repeat for some time before giving up. */
   9819 				continue;
   9820 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   9821 				break;
   9822 		}
   9823 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   9824 
   9825 	return error;
   9826 }
   9827 
   9828 /******************************************************************************
   9829  * Reads a single byte from the NVM using the ICH8 flash access registers.
   9830  *
   9831  * sc - pointer to wm_hw structure
   9832  * index - The index of the byte to read.
   9833  * data - Pointer to a byte to store the value read.
   9834  *****************************************************************************/
   9835 static int32_t
   9836 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   9837 {
   9838 	int32_t status;
   9839 	uint16_t word = 0;
   9840 
   9841 	status = wm_read_ich8_data(sc, index, 1, &word);
   9842 	if (status == 0)
   9843 		*data = (uint8_t)word;
   9844 	else
   9845 		*data = 0;
   9846 
   9847 	return status;
   9848 }
   9849 
   9850 /******************************************************************************
   9851  * Reads a word from the NVM using the ICH8 flash access registers.
   9852  *
   9853  * sc - pointer to wm_hw structure
   9854  * index - The starting byte index of the word to read.
   9855  * data - Pointer to a word to store the value read.
   9856  *****************************************************************************/
   9857 static int32_t
   9858 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   9859 {
   9860 	int32_t status;
   9861 
   9862 	status = wm_read_ich8_data(sc, index, 2, data);
   9863 	return status;
   9864 }
   9865 
   9866 /******************************************************************************
   9867  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   9868  * register.
   9869  *
   9870  * sc - Struct containing variables accessed by shared code
   9871  * offset - offset of word in the EEPROM to read
   9872  * data - word read from the EEPROM
   9873  * words - number of words to read
   9874  *****************************************************************************/
   9875 static int
   9876 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   9877 {
   9878 	int32_t  error = 0;
   9879 	uint32_t flash_bank = 0;
   9880 	uint32_t act_offset = 0;
   9881 	uint32_t bank_offset = 0;
   9882 	uint16_t word = 0;
   9883 	uint16_t i = 0;
   9884 
   9885 	/*
   9886 	 * We need to know which is the valid flash bank.  In the event
   9887 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   9888 	 * managing flash_bank.  So it cannot be trusted and needs
   9889 	 * to be updated with each read.
   9890 	 */
   9891 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   9892 	if (error) {
   9893 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   9894 			device_xname(sc->sc_dev)));
   9895 		flash_bank = 0;
   9896 	}
   9897 
   9898 	/*
   9899 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   9900 	 * size
   9901 	 */
   9902 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   9903 
   9904 	error = wm_get_swfwhw_semaphore(sc);
   9905 	if (error) {
   9906 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9907 		    __func__);
   9908 		return error;
   9909 	}
   9910 
   9911 	for (i = 0; i < words; i++) {
   9912 		/* The NVM part needs a byte offset, hence * 2 */
   9913 		act_offset = bank_offset + ((offset + i) * 2);
   9914 		error = wm_read_ich8_word(sc, act_offset, &word);
   9915 		if (error) {
   9916 			aprint_error_dev(sc->sc_dev,
   9917 			    "%s: failed to read NVM\n", __func__);
   9918 			break;
   9919 		}
   9920 		data[i] = word;
   9921 	}
   9922 
   9923 	wm_put_swfwhw_semaphore(sc);
   9924 	return error;
   9925 }
   9926 
   9927 /* iNVM */
   9928 
   9929 static int
   9930 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   9931 {
   9932 	int32_t  rv = 0;
   9933 	uint32_t invm_dword;
   9934 	uint16_t i;
   9935 	uint8_t record_type, word_address;
   9936 
   9937 	for (i = 0; i < INVM_SIZE; i++) {
   9938 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   9939 		/* Get record type */
   9940 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   9941 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   9942 			break;
   9943 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   9944 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   9945 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   9946 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   9947 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   9948 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   9949 			if (word_address == address) {
   9950 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   9951 				rv = 0;
   9952 				break;
   9953 			}
   9954 		}
   9955 	}
   9956 
   9957 	return rv;
   9958 }
   9959 
   9960 static int
   9961 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   9962 {
   9963 	int rv = 0;
   9964 	int i;
   9965 
   9966 	for (i = 0; i < words; i++) {
   9967 		switch (offset + i) {
   9968 		case NVM_OFF_MACADDR:
   9969 		case NVM_OFF_MACADDR1:
   9970 		case NVM_OFF_MACADDR2:
   9971 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   9972 			if (rv != 0) {
   9973 				data[i] = 0xffff;
   9974 				rv = -1;
   9975 			}
   9976 			break;
   9977 		case NVM_OFF_CFG2:
   9978 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9979 			if (rv != 0) {
   9980 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   9981 				rv = 0;
   9982 			}
   9983 			break;
   9984 		case NVM_OFF_CFG4:
   9985 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9986 			if (rv != 0) {
   9987 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   9988 				rv = 0;
   9989 			}
   9990 			break;
   9991 		case NVM_OFF_LED_1_CFG:
   9992 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9993 			if (rv != 0) {
   9994 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   9995 				rv = 0;
   9996 			}
   9997 			break;
   9998 		case NVM_OFF_LED_0_2_CFG:
   9999 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10000 			if (rv != 0) {
   10001 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10002 				rv = 0;
   10003 			}
   10004 			break;
   10005 		case NVM_OFF_ID_LED_SETTINGS:
   10006 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10007 			if (rv != 0) {
   10008 				*data = ID_LED_RESERVED_FFFF;
   10009 				rv = 0;
   10010 			}
   10011 			break;
   10012 		default:
   10013 			DPRINTF(WM_DEBUG_NVM,
   10014 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10015 			*data = NVM_RESERVED_WORD;
   10016 			break;
   10017 		}
   10018 	}
   10019 
   10020 	return rv;
   10021 }
   10022 
   10023 /* Lock, detecting NVM type, validate checksum, version and read */
   10024 
   10025 /*
   10026  * wm_nvm_acquire:
   10027  *
   10028  *	Perform the EEPROM handshake required on some chips.
   10029  */
   10030 static int
   10031 wm_nvm_acquire(struct wm_softc *sc)
   10032 {
   10033 	uint32_t reg;
   10034 	int x;
   10035 	int ret = 0;
   10036 
   10037 	/* always success */
   10038 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10039 		return 0;
   10040 
   10041 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10042 		ret = wm_get_swfwhw_semaphore(sc);
   10043 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10044 		/* This will also do wm_get_swsm_semaphore() if needed */
   10045 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10046 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10047 		ret = wm_get_swsm_semaphore(sc);
   10048 	}
   10049 
   10050 	if (ret) {
   10051 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10052 			__func__);
   10053 		return 1;
   10054 	}
   10055 
   10056 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10057 		reg = CSR_READ(sc, WMREG_EECD);
   10058 
   10059 		/* Request EEPROM access. */
   10060 		reg |= EECD_EE_REQ;
   10061 		CSR_WRITE(sc, WMREG_EECD, reg);
   10062 
   10063 		/* ..and wait for it to be granted. */
   10064 		for (x = 0; x < 1000; x++) {
   10065 			reg = CSR_READ(sc, WMREG_EECD);
   10066 			if (reg & EECD_EE_GNT)
   10067 				break;
   10068 			delay(5);
   10069 		}
   10070 		if ((reg & EECD_EE_GNT) == 0) {
   10071 			aprint_error_dev(sc->sc_dev,
   10072 			    "could not acquire EEPROM GNT\n");
   10073 			reg &= ~EECD_EE_REQ;
   10074 			CSR_WRITE(sc, WMREG_EECD, reg);
   10075 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10076 				wm_put_swfwhw_semaphore(sc);
   10077 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10078 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10079 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10080 				wm_put_swsm_semaphore(sc);
   10081 			return 1;
   10082 		}
   10083 	}
   10084 
   10085 	return 0;
   10086 }
   10087 
   10088 /*
   10089  * wm_nvm_release:
   10090  *
   10091  *	Release the EEPROM mutex.
   10092  */
   10093 static void
   10094 wm_nvm_release(struct wm_softc *sc)
   10095 {
   10096 	uint32_t reg;
   10097 
   10098 	/* always success */
   10099 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10100 		return;
   10101 
   10102 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10103 		reg = CSR_READ(sc, WMREG_EECD);
   10104 		reg &= ~EECD_EE_REQ;
   10105 		CSR_WRITE(sc, WMREG_EECD, reg);
   10106 	}
   10107 
   10108 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10109 		wm_put_swfwhw_semaphore(sc);
   10110 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10111 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10112 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10113 		wm_put_swsm_semaphore(sc);
   10114 }
   10115 
   10116 static int
   10117 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10118 {
   10119 	uint32_t eecd = 0;
   10120 
   10121 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10122 	    || sc->sc_type == WM_T_82583) {
   10123 		eecd = CSR_READ(sc, WMREG_EECD);
   10124 
   10125 		/* Isolate bits 15 & 16 */
   10126 		eecd = ((eecd >> 15) & 0x03);
   10127 
   10128 		/* If both bits are set, device is Flash type */
   10129 		if (eecd == 0x03)
   10130 			return 0;
   10131 	}
   10132 	return 1;
   10133 }
   10134 
   10135 static int
   10136 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10137 {
   10138 	uint32_t eec;
   10139 
   10140 	eec = CSR_READ(sc, WMREG_EEC);
   10141 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10142 		return 1;
   10143 
   10144 	return 0;
   10145 }
   10146 
   10147 /*
   10148  * wm_nvm_validate_checksum
   10149  *
   10150  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10151  */
   10152 static int
   10153 wm_nvm_validate_checksum(struct wm_softc *sc)
   10154 {
   10155 	uint16_t checksum;
   10156 	uint16_t eeprom_data;
   10157 #ifdef WM_DEBUG
   10158 	uint16_t csum_wordaddr, valid_checksum;
   10159 #endif
   10160 	int i;
   10161 
   10162 	checksum = 0;
   10163 
   10164 	/* Don't check for I211 */
   10165 	if (sc->sc_type == WM_T_I211)
   10166 		return 0;
   10167 
   10168 #ifdef WM_DEBUG
   10169 	if (sc->sc_type == WM_T_PCH_LPT) {
   10170 		csum_wordaddr = NVM_OFF_COMPAT;
   10171 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10172 	} else {
   10173 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10174 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10175 	}
   10176 
   10177 	/* Dump EEPROM image for debug */
   10178 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10179 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10180 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10181 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10182 		if ((eeprom_data & valid_checksum) == 0) {
   10183 			DPRINTF(WM_DEBUG_NVM,
   10184 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10185 				device_xname(sc->sc_dev), eeprom_data,
   10186 				    valid_checksum));
   10187 		}
   10188 	}
   10189 
   10190 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10191 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10192 		for (i = 0; i < NVM_SIZE; i++) {
   10193 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10194 				printf("XXXX ");
   10195 			else
   10196 				printf("%04hx ", eeprom_data);
   10197 			if (i % 8 == 7)
   10198 				printf("\n");
   10199 		}
   10200 	}
   10201 
   10202 #endif /* WM_DEBUG */
   10203 
   10204 	for (i = 0; i < NVM_SIZE; i++) {
   10205 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10206 			return 1;
   10207 		checksum += eeprom_data;
   10208 	}
   10209 
   10210 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10211 #ifdef WM_DEBUG
   10212 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10213 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10214 #endif
   10215 	}
   10216 
   10217 	return 0;
   10218 }
   10219 
   10220 static void
   10221 wm_nvm_version_invm(struct wm_softc *sc)
   10222 {
   10223 	uint32_t dword;
   10224 
   10225 	/*
   10226 	 * Linux's code to decode version is very strange, so we don't
   10227 	 * obey that algorithm and just use word 61 as the document.
   10228 	 * Perhaps it's not perfect though...
   10229 	 *
   10230 	 * Example:
   10231 	 *
   10232 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10233 	 */
   10234 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10235 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10236 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10237 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10238 }
   10239 
   10240 static void
   10241 wm_nvm_version(struct wm_softc *sc)
   10242 {
   10243 	uint16_t major, minor, build, patch;
   10244 	uint16_t uid0, uid1;
   10245 	uint16_t nvm_data;
   10246 	uint16_t off;
   10247 	bool check_version = false;
   10248 	bool check_optionrom = false;
   10249 	bool have_build = false;
   10250 
   10251 	/*
   10252 	 * Version format:
   10253 	 *
   10254 	 * XYYZ
   10255 	 * X0YZ
   10256 	 * X0YY
   10257 	 *
   10258 	 * Example:
   10259 	 *
   10260 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10261 	 *	82571	0x50a6	5.10.6?
   10262 	 *	82572	0x506a	5.6.10?
   10263 	 *	82572EI	0x5069	5.6.9?
   10264 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10265 	 *		0x2013	2.1.3?
   10266 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10267 	 */
   10268 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10269 	switch (sc->sc_type) {
   10270 	case WM_T_82571:
   10271 	case WM_T_82572:
   10272 	case WM_T_82574:
   10273 	case WM_T_82583:
   10274 		check_version = true;
   10275 		check_optionrom = true;
   10276 		have_build = true;
   10277 		break;
   10278 	case WM_T_82575:
   10279 	case WM_T_82576:
   10280 	case WM_T_82580:
   10281 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10282 			check_version = true;
   10283 		break;
   10284 	case WM_T_I211:
   10285 		wm_nvm_version_invm(sc);
   10286 		goto printver;
   10287 	case WM_T_I210:
   10288 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10289 			wm_nvm_version_invm(sc);
   10290 			goto printver;
   10291 		}
   10292 		/* FALLTHROUGH */
   10293 	case WM_T_I350:
   10294 	case WM_T_I354:
   10295 		check_version = true;
   10296 		check_optionrom = true;
   10297 		break;
   10298 	default:
   10299 		return;
   10300 	}
   10301 	if (check_version) {
   10302 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10303 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10304 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10305 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10306 			build = nvm_data & NVM_BUILD_MASK;
   10307 			have_build = true;
   10308 		} else
   10309 			minor = nvm_data & 0x00ff;
   10310 
   10311 		/* Decimal */
   10312 		minor = (minor / 16) * 10 + (minor % 16);
   10313 		sc->sc_nvm_ver_major = major;
   10314 		sc->sc_nvm_ver_minor = minor;
   10315 
   10316 printver:
   10317 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10318 		    sc->sc_nvm_ver_minor);
   10319 		if (have_build) {
   10320 			sc->sc_nvm_ver_build = build;
   10321 			aprint_verbose(".%d", build);
   10322 		}
   10323 	}
   10324 	if (check_optionrom) {
   10325 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10326 		/* Option ROM Version */
   10327 		if ((off != 0x0000) && (off != 0xffff)) {
   10328 			off += NVM_COMBO_VER_OFF;
   10329 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10330 			wm_nvm_read(sc, off, 1, &uid0);
   10331 			if ((uid0 != 0) && (uid0 != 0xffff)
   10332 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10333 				/* 16bits */
   10334 				major = uid0 >> 8;
   10335 				build = (uid0 << 8) | (uid1 >> 8);
   10336 				patch = uid1 & 0x00ff;
   10337 				aprint_verbose(", option ROM Version %d.%d.%d",
   10338 				    major, build, patch);
   10339 			}
   10340 		}
   10341 	}
   10342 
   10343 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10344 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10345 }
   10346 
   10347 /*
   10348  * wm_nvm_read:
   10349  *
   10350  *	Read data from the serial EEPROM.
   10351  */
   10352 static int
   10353 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10354 {
   10355 	int rv;
   10356 
   10357 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10358 		return 1;
   10359 
   10360 	if (wm_nvm_acquire(sc))
   10361 		return 1;
   10362 
   10363 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10364 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10365 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10366 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10367 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10368 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10369 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10370 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10371 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10372 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10373 	else
   10374 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10375 
   10376 	wm_nvm_release(sc);
   10377 	return rv;
   10378 }
   10379 
   10380 /*
   10381  * Hardware semaphores.
   10382  * Very complexed...
   10383  */
   10384 
   10385 static int
   10386 wm_get_swsm_semaphore(struct wm_softc *sc)
   10387 {
   10388 	int32_t timeout;
   10389 	uint32_t swsm;
   10390 
   10391 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10392 		/* Get the SW semaphore. */
   10393 		timeout = sc->sc_nvm_wordsize + 1;
   10394 		while (timeout) {
   10395 			swsm = CSR_READ(sc, WMREG_SWSM);
   10396 
   10397 			if ((swsm & SWSM_SMBI) == 0)
   10398 				break;
   10399 
   10400 			delay(50);
   10401 			timeout--;
   10402 		}
   10403 
   10404 		if (timeout == 0) {
   10405 			aprint_error_dev(sc->sc_dev,
   10406 			    "could not acquire SWSM SMBI\n");
   10407 			return 1;
   10408 		}
   10409 	}
   10410 
   10411 	/* Get the FW semaphore. */
   10412 	timeout = sc->sc_nvm_wordsize + 1;
   10413 	while (timeout) {
   10414 		swsm = CSR_READ(sc, WMREG_SWSM);
   10415 		swsm |= SWSM_SWESMBI;
   10416 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   10417 		/* If we managed to set the bit we got the semaphore. */
   10418 		swsm = CSR_READ(sc, WMREG_SWSM);
   10419 		if (swsm & SWSM_SWESMBI)
   10420 			break;
   10421 
   10422 		delay(50);
   10423 		timeout--;
   10424 	}
   10425 
   10426 	if (timeout == 0) {
   10427 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   10428 		/* Release semaphores */
   10429 		wm_put_swsm_semaphore(sc);
   10430 		return 1;
   10431 	}
   10432 	return 0;
   10433 }
   10434 
   10435 static void
   10436 wm_put_swsm_semaphore(struct wm_softc *sc)
   10437 {
   10438 	uint32_t swsm;
   10439 
   10440 	swsm = CSR_READ(sc, WMREG_SWSM);
   10441 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   10442 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   10443 }
   10444 
   10445 static int
   10446 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10447 {
   10448 	uint32_t swfw_sync;
   10449 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   10450 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   10451 	int timeout = 200;
   10452 
   10453 	for (timeout = 0; timeout < 200; timeout++) {
   10454 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10455 			if (wm_get_swsm_semaphore(sc)) {
   10456 				aprint_error_dev(sc->sc_dev,
   10457 				    "%s: failed to get semaphore\n",
   10458 				    __func__);
   10459 				return 1;
   10460 			}
   10461 		}
   10462 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10463 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   10464 			swfw_sync |= swmask;
   10465 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10466 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   10467 				wm_put_swsm_semaphore(sc);
   10468 			return 0;
   10469 		}
   10470 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   10471 			wm_put_swsm_semaphore(sc);
   10472 		delay(5000);
   10473 	}
   10474 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   10475 	    device_xname(sc->sc_dev), mask, swfw_sync);
   10476 	return 1;
   10477 }
   10478 
   10479 static void
   10480 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10481 {
   10482 	uint32_t swfw_sync;
   10483 
   10484 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10485 		while (wm_get_swsm_semaphore(sc) != 0)
   10486 			continue;
   10487 	}
   10488 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10489 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   10490 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10491 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   10492 		wm_put_swsm_semaphore(sc);
   10493 }
   10494 
   10495 static int
   10496 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   10497 {
   10498 	uint32_t ext_ctrl;
   10499 	int timeout = 200;
   10500 
   10501 	for (timeout = 0; timeout < 200; timeout++) {
   10502 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10503 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10504 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10505 
   10506 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10507 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   10508 			return 0;
   10509 		delay(5000);
   10510 	}
   10511 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   10512 	    device_xname(sc->sc_dev), ext_ctrl);
   10513 	return 1;
   10514 }
   10515 
   10516 static void
   10517 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   10518 {
   10519 	uint32_t ext_ctrl;
   10520 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10521 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10522 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10523 }
   10524 
   10525 static int
   10526 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   10527 {
   10528 	int i = 0;
   10529 	uint32_t reg;
   10530 
   10531 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10532 	do {
   10533 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   10534 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   10535 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10536 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   10537 			break;
   10538 		delay(2*1000);
   10539 		i++;
   10540 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   10541 
   10542 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   10543 		wm_put_hw_semaphore_82573(sc);
   10544 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   10545 		    device_xname(sc->sc_dev));
   10546 		return -1;
   10547 	}
   10548 
   10549 	return 0;
   10550 }
   10551 
   10552 static void
   10553 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   10554 {
   10555 	uint32_t reg;
   10556 
   10557 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10558 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10559 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10560 }
   10561 
   10562 /*
   10563  * Management mode and power management related subroutines.
   10564  * BMC, AMT, suspend/resume and EEE.
   10565  */
   10566 
   10567 static int
   10568 wm_check_mng_mode(struct wm_softc *sc)
   10569 {
   10570 	int rv;
   10571 
   10572 	switch (sc->sc_type) {
   10573 	case WM_T_ICH8:
   10574 	case WM_T_ICH9:
   10575 	case WM_T_ICH10:
   10576 	case WM_T_PCH:
   10577 	case WM_T_PCH2:
   10578 	case WM_T_PCH_LPT:
   10579 		rv = wm_check_mng_mode_ich8lan(sc);
   10580 		break;
   10581 	case WM_T_82574:
   10582 	case WM_T_82583:
   10583 		rv = wm_check_mng_mode_82574(sc);
   10584 		break;
   10585 	case WM_T_82571:
   10586 	case WM_T_82572:
   10587 	case WM_T_82573:
   10588 	case WM_T_80003:
   10589 		rv = wm_check_mng_mode_generic(sc);
   10590 		break;
   10591 	default:
   10592 		/* noting to do */
   10593 		rv = 0;
   10594 		break;
   10595 	}
   10596 
   10597 	return rv;
   10598 }
   10599 
   10600 static int
   10601 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   10602 {
   10603 	uint32_t fwsm;
   10604 
   10605 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10606 
   10607 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   10608 		return 1;
   10609 
   10610 	return 0;
   10611 }
   10612 
   10613 static int
   10614 wm_check_mng_mode_82574(struct wm_softc *sc)
   10615 {
   10616 	uint16_t data;
   10617 
   10618 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   10619 
   10620 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   10621 		return 1;
   10622 
   10623 	return 0;
   10624 }
   10625 
   10626 static int
   10627 wm_check_mng_mode_generic(struct wm_softc *sc)
   10628 {
   10629 	uint32_t fwsm;
   10630 
   10631 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10632 
   10633 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   10634 		return 1;
   10635 
   10636 	return 0;
   10637 }
   10638 
   10639 static int
   10640 wm_enable_mng_pass_thru(struct wm_softc *sc)
   10641 {
   10642 	uint32_t manc, fwsm, factps;
   10643 
   10644 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   10645 		return 0;
   10646 
   10647 	manc = CSR_READ(sc, WMREG_MANC);
   10648 
   10649 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   10650 		device_xname(sc->sc_dev), manc));
   10651 	if ((manc & MANC_RECV_TCO_EN) == 0)
   10652 		return 0;
   10653 
   10654 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   10655 		fwsm = CSR_READ(sc, WMREG_FWSM);
   10656 		factps = CSR_READ(sc, WMREG_FACTPS);
   10657 		if (((factps & FACTPS_MNGCG) == 0)
   10658 		    && ((fwsm & FWSM_MODE_MASK)
   10659 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   10660 			return 1;
   10661 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10662 		uint16_t data;
   10663 
   10664 		factps = CSR_READ(sc, WMREG_FACTPS);
   10665 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   10666 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   10667 			device_xname(sc->sc_dev), factps, data));
   10668 		if (((factps & FACTPS_MNGCG) == 0)
   10669 		    && ((data & NVM_CFG2_MNGM_MASK)
   10670 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   10671 			return 1;
   10672 	} else if (((manc & MANC_SMBUS_EN) != 0)
   10673 	    && ((manc & MANC_ASF_EN) == 0))
   10674 		return 1;
   10675 
   10676 	return 0;
   10677 }
   10678 
   10679 static int
   10680 wm_check_reset_block(struct wm_softc *sc)
   10681 {
   10682 	uint32_t reg;
   10683 
   10684 	switch (sc->sc_type) {
   10685 	case WM_T_ICH8:
   10686 	case WM_T_ICH9:
   10687 	case WM_T_ICH10:
   10688 	case WM_T_PCH:
   10689 	case WM_T_PCH2:
   10690 	case WM_T_PCH_LPT:
   10691 		reg = CSR_READ(sc, WMREG_FWSM);
   10692 		if ((reg & FWSM_RSPCIPHY) != 0)
   10693 			return 0;
   10694 		else
   10695 			return -1;
   10696 		break;
   10697 	case WM_T_82571:
   10698 	case WM_T_82572:
   10699 	case WM_T_82573:
   10700 	case WM_T_82574:
   10701 	case WM_T_82583:
   10702 	case WM_T_80003:
   10703 		reg = CSR_READ(sc, WMREG_MANC);
   10704 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   10705 			return -1;
   10706 		else
   10707 			return 0;
   10708 		break;
   10709 	default:
   10710 		/* no problem */
   10711 		break;
   10712 	}
   10713 
   10714 	return 0;
   10715 }
   10716 
   10717 static void
   10718 wm_get_hw_control(struct wm_softc *sc)
   10719 {
   10720 	uint32_t reg;
   10721 
   10722 	switch (sc->sc_type) {
   10723 	case WM_T_82573:
   10724 		reg = CSR_READ(sc, WMREG_SWSM);
   10725 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   10726 		break;
   10727 	case WM_T_82571:
   10728 	case WM_T_82572:
   10729 	case WM_T_82574:
   10730 	case WM_T_82583:
   10731 	case WM_T_80003:
   10732 	case WM_T_ICH8:
   10733 	case WM_T_ICH9:
   10734 	case WM_T_ICH10:
   10735 	case WM_T_PCH:
   10736 	case WM_T_PCH2:
   10737 	case WM_T_PCH_LPT:
   10738 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10739 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   10740 		break;
   10741 	default:
   10742 		break;
   10743 	}
   10744 }
   10745 
   10746 static void
   10747 wm_release_hw_control(struct wm_softc *sc)
   10748 {
   10749 	uint32_t reg;
   10750 
   10751 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   10752 		return;
   10753 
   10754 	if (sc->sc_type == WM_T_82573) {
   10755 		reg = CSR_READ(sc, WMREG_SWSM);
   10756 		reg &= ~SWSM_DRV_LOAD;
   10757 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   10758 	} else {
   10759 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10760 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   10761 	}
   10762 }
   10763 
   10764 static void
   10765 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   10766 {
   10767 	uint32_t reg;
   10768 
   10769 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10770 
   10771 	if (on != 0)
   10772 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   10773 	else
   10774 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   10775 
   10776 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10777 }
   10778 
   10779 static void
   10780 wm_smbustopci(struct wm_softc *sc)
   10781 {
   10782 	uint32_t fwsm;
   10783 
   10784 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10785 	if (((fwsm & FWSM_FW_VALID) == 0)
   10786 	    && ((wm_check_reset_block(sc) == 0))) {
   10787 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   10788 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   10789 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10790 		CSR_WRITE_FLUSH(sc);
   10791 		delay(10);
   10792 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   10793 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10794 		CSR_WRITE_FLUSH(sc);
   10795 		delay(50*1000);
   10796 
   10797 		/*
   10798 		 * Gate automatic PHY configuration by hardware on non-managed
   10799 		 * 82579
   10800 		 */
   10801 		if (sc->sc_type == WM_T_PCH2)
   10802 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   10803 	}
   10804 }
   10805 
   10806 static void
   10807 wm_init_manageability(struct wm_softc *sc)
   10808 {
   10809 
   10810 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   10811 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   10812 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   10813 
   10814 		/* Disable hardware interception of ARP */
   10815 		manc &= ~MANC_ARP_EN;
   10816 
   10817 		/* Enable receiving management packets to the host */
   10818 		if (sc->sc_type >= WM_T_82571) {
   10819 			manc |= MANC_EN_MNG2HOST;
   10820 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   10821 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   10822 		}
   10823 
   10824 		CSR_WRITE(sc, WMREG_MANC, manc);
   10825 	}
   10826 }
   10827 
   10828 static void
   10829 wm_release_manageability(struct wm_softc *sc)
   10830 {
   10831 
   10832 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   10833 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   10834 
   10835 		manc |= MANC_ARP_EN;
   10836 		if (sc->sc_type >= WM_T_82571)
   10837 			manc &= ~MANC_EN_MNG2HOST;
   10838 
   10839 		CSR_WRITE(sc, WMREG_MANC, manc);
   10840 	}
   10841 }
   10842 
   10843 static void
   10844 wm_get_wakeup(struct wm_softc *sc)
   10845 {
   10846 
   10847 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   10848 	switch (sc->sc_type) {
   10849 	case WM_T_82573:
   10850 	case WM_T_82583:
   10851 		sc->sc_flags |= WM_F_HAS_AMT;
   10852 		/* FALLTHROUGH */
   10853 	case WM_T_80003:
   10854 	case WM_T_82541:
   10855 	case WM_T_82547:
   10856 	case WM_T_82571:
   10857 	case WM_T_82572:
   10858 	case WM_T_82574:
   10859 	case WM_T_82575:
   10860 	case WM_T_82576:
   10861 	case WM_T_82580:
   10862 	case WM_T_I350:
   10863 	case WM_T_I354:
   10864 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   10865 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   10866 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   10867 		break;
   10868 	case WM_T_ICH8:
   10869 	case WM_T_ICH9:
   10870 	case WM_T_ICH10:
   10871 	case WM_T_PCH:
   10872 	case WM_T_PCH2:
   10873 	case WM_T_PCH_LPT:
   10874 		sc->sc_flags |= WM_F_HAS_AMT;
   10875 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   10876 		break;
   10877 	default:
   10878 		break;
   10879 	}
   10880 
   10881 	/* 1: HAS_MANAGE */
   10882 	if (wm_enable_mng_pass_thru(sc) != 0)
   10883 		sc->sc_flags |= WM_F_HAS_MANAGE;
   10884 
   10885 #ifdef WM_DEBUG
   10886 	printf("\n");
   10887 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   10888 		printf("HAS_AMT,");
   10889 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   10890 		printf("ARC_SUBSYS_VALID,");
   10891 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   10892 		printf("ASF_FIRMWARE_PRES,");
   10893 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   10894 		printf("HAS_MANAGE,");
   10895 	printf("\n");
   10896 #endif
   10897 	/*
   10898 	 * Note that the WOL flags is set after the resetting of the eeprom
   10899 	 * stuff
   10900 	 */
   10901 }
   10902 
   10903 #ifdef WM_WOL
   10904 /* WOL in the newer chipset interfaces (pchlan) */
   10905 static void
   10906 wm_enable_phy_wakeup(struct wm_softc *sc)
   10907 {
   10908 #if 0
   10909 	uint16_t preg;
   10910 
   10911 	/* Copy MAC RARs to PHY RARs */
   10912 
   10913 	/* Copy MAC MTA to PHY MTA */
   10914 
   10915 	/* Configure PHY Rx Control register */
   10916 
   10917 	/* Enable PHY wakeup in MAC register */
   10918 
   10919 	/* Configure and enable PHY wakeup in PHY registers */
   10920 
   10921 	/* Activate PHY wakeup */
   10922 
   10923 	/* XXX */
   10924 #endif
   10925 }
   10926 
   10927 /* Power down workaround on D3 */
   10928 static void
   10929 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   10930 {
   10931 	uint32_t reg;
   10932 	int i;
   10933 
   10934 	for (i = 0; i < 2; i++) {
   10935 		/* Disable link */
   10936 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10937 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   10938 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10939 
   10940 		/*
   10941 		 * Call gig speed drop workaround on Gig disable before
   10942 		 * accessing any PHY registers
   10943 		 */
   10944 		if (sc->sc_type == WM_T_ICH8)
   10945 			wm_gig_downshift_workaround_ich8lan(sc);
   10946 
   10947 		/* Write VR power-down enable */
   10948 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   10949 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   10950 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   10951 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   10952 
   10953 		/* Read it back and test */
   10954 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   10955 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   10956 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   10957 			break;
   10958 
   10959 		/* Issue PHY reset and repeat at most one more time */
   10960 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10961 	}
   10962 }
   10963 
   10964 static void
   10965 wm_enable_wakeup(struct wm_softc *sc)
   10966 {
   10967 	uint32_t reg, pmreg;
   10968 	pcireg_t pmode;
   10969 
   10970 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   10971 		&pmreg, NULL) == 0)
   10972 		return;
   10973 
   10974 	/* Advertise the wakeup capability */
   10975 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   10976 	    | CTRL_SWDPIN(3));
   10977 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   10978 
   10979 	/* ICH workaround */
   10980 	switch (sc->sc_type) {
   10981 	case WM_T_ICH8:
   10982 	case WM_T_ICH9:
   10983 	case WM_T_ICH10:
   10984 	case WM_T_PCH:
   10985 	case WM_T_PCH2:
   10986 	case WM_T_PCH_LPT:
   10987 		/* Disable gig during WOL */
   10988 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10989 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   10990 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10991 		if (sc->sc_type == WM_T_PCH)
   10992 			wm_gmii_reset(sc);
   10993 
   10994 		/* Power down workaround */
   10995 		if (sc->sc_phytype == WMPHY_82577) {
   10996 			struct mii_softc *child;
   10997 
   10998 			/* Assume that the PHY is copper */
   10999 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11000 			if (child->mii_mpd_rev <= 2)
   11001 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11002 				    (768 << 5) | 25, 0x0444); /* magic num */
   11003 		}
   11004 		break;
   11005 	default:
   11006 		break;
   11007 	}
   11008 
   11009 	/* Keep the laser running on fiber adapters */
   11010 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11011 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11012 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11013 		reg |= CTRL_EXT_SWDPIN(3);
   11014 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11015 	}
   11016 
   11017 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11018 #if 0	/* for the multicast packet */
   11019 	reg |= WUFC_MC;
   11020 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11021 #endif
   11022 
   11023 	if (sc->sc_type == WM_T_PCH) {
   11024 		wm_enable_phy_wakeup(sc);
   11025 	} else {
   11026 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11027 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11028 	}
   11029 
   11030 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11031 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11032 		|| (sc->sc_type == WM_T_PCH2))
   11033 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11034 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11035 
   11036 	/* Request PME */
   11037 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11038 #if 0
   11039 	/* Disable WOL */
   11040 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11041 #else
   11042 	/* For WOL */
   11043 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11044 #endif
   11045 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11046 }
   11047 #endif /* WM_WOL */
   11048 
   11049 /* EEE */
   11050 
   11051 static void
   11052 wm_set_eee_i350(struct wm_softc *sc)
   11053 {
   11054 	uint32_t ipcnfg, eeer;
   11055 
   11056 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11057 	eeer = CSR_READ(sc, WMREG_EEER);
   11058 
   11059 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11060 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11061 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11062 		    | EEER_LPI_FC);
   11063 	} else {
   11064 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11065 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11066 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11067 		    | EEER_LPI_FC);
   11068 	}
   11069 
   11070 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11071 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11072 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11073 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11074 }
   11075 
   11076 /*
   11077  * Workarounds (mainly PHY related).
   11078  * Basically, PHY's workarounds are in the PHY drivers.
   11079  */
   11080 
   11081 /* Work-around for 82566 Kumeran PCS lock loss */
   11082 static void
   11083 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11084 {
   11085 	int miistatus, active, i;
   11086 	int reg;
   11087 
   11088 	miistatus = sc->sc_mii.mii_media_status;
   11089 
   11090 	/* If the link is not up, do nothing */
   11091 	if ((miistatus & IFM_ACTIVE) != 0)
   11092 		return;
   11093 
   11094 	active = sc->sc_mii.mii_media_active;
   11095 
   11096 	/* Nothing to do if the link is other than 1Gbps */
   11097 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11098 		return;
   11099 
   11100 	for (i = 0; i < 10; i++) {
   11101 		/* read twice */
   11102 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11103 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11104 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   11105 			goto out;	/* GOOD! */
   11106 
   11107 		/* Reset the PHY */
   11108 		wm_gmii_reset(sc);
   11109 		delay(5*1000);
   11110 	}
   11111 
   11112 	/* Disable GigE link negotiation */
   11113 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11114 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11115 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11116 
   11117 	/*
   11118 	 * Call gig speed drop workaround on Gig disable before accessing
   11119 	 * any PHY registers.
   11120 	 */
   11121 	wm_gig_downshift_workaround_ich8lan(sc);
   11122 
   11123 out:
   11124 	return;
   11125 }
   11126 
   11127 /* WOL from S5 stops working */
   11128 static void
   11129 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11130 {
   11131 	uint16_t kmrn_reg;
   11132 
   11133 	/* Only for igp3 */
   11134 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11135 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11136 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11137 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11138 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11139 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11140 	}
   11141 }
   11142 
   11143 /*
   11144  * Workaround for pch's PHYs
   11145  * XXX should be moved to new PHY driver?
   11146  */
   11147 static void
   11148 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11149 {
   11150 	if (sc->sc_phytype == WMPHY_82577)
   11151 		wm_set_mdio_slow_mode_hv(sc);
   11152 
   11153 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11154 
   11155 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11156 
   11157 	/* 82578 */
   11158 	if (sc->sc_phytype == WMPHY_82578) {
   11159 		/* PCH rev. < 3 */
   11160 		if (sc->sc_rev < 3) {
   11161 			/* XXX 6 bit shift? Why? Is it page2? */
   11162 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11163 			    0x66c0);
   11164 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11165 			    0xffff);
   11166 		}
   11167 
   11168 		/* XXX phy rev. < 2 */
   11169 	}
   11170 
   11171 	/* Select page 0 */
   11172 
   11173 	/* XXX acquire semaphore */
   11174 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11175 	/* XXX release semaphore */
   11176 
   11177 	/*
   11178 	 * Configure the K1 Si workaround during phy reset assuming there is
   11179 	 * link so that it disables K1 if link is in 1Gbps.
   11180 	 */
   11181 	wm_k1_gig_workaround_hv(sc, 1);
   11182 }
   11183 
   11184 static void
   11185 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11186 {
   11187 
   11188 	wm_set_mdio_slow_mode_hv(sc);
   11189 }
   11190 
   11191 static void
   11192 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11193 {
   11194 	int k1_enable = sc->sc_nvm_k1_enabled;
   11195 
   11196 	/* XXX acquire semaphore */
   11197 
   11198 	if (link) {
   11199 		k1_enable = 0;
   11200 
   11201 		/* Link stall fix for link up */
   11202 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11203 	} else {
   11204 		/* Link stall fix for link down */
   11205 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11206 	}
   11207 
   11208 	wm_configure_k1_ich8lan(sc, k1_enable);
   11209 
   11210 	/* XXX release semaphore */
   11211 }
   11212 
   11213 static void
   11214 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11215 {
   11216 	uint32_t reg;
   11217 
   11218 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11219 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11220 	    reg | HV_KMRN_MDIO_SLOW);
   11221 }
   11222 
   11223 static void
   11224 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11225 {
   11226 	uint32_t ctrl, ctrl_ext, tmp;
   11227 	uint16_t kmrn_reg;
   11228 
   11229 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11230 
   11231 	if (k1_enable)
   11232 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11233 	else
   11234 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11235 
   11236 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11237 
   11238 	delay(20);
   11239 
   11240 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11241 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11242 
   11243 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11244 	tmp |= CTRL_FRCSPD;
   11245 
   11246 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11247 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11248 	CSR_WRITE_FLUSH(sc);
   11249 	delay(20);
   11250 
   11251 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11252 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11253 	CSR_WRITE_FLUSH(sc);
   11254 	delay(20);
   11255 }
   11256 
   11257 /* special case - for 82575 - need to do manual init ... */
   11258 static void
   11259 wm_reset_init_script_82575(struct wm_softc *sc)
   11260 {
   11261 	/*
   11262 	 * remark: this is untested code - we have no board without EEPROM
   11263 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11264 	 */
   11265 
   11266 	/* SerDes configuration via SERDESCTRL */
   11267 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11268 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11269 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11270 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11271 
   11272 	/* CCM configuration via CCMCTL register */
   11273 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11274 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11275 
   11276 	/* PCIe lanes configuration */
   11277 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11278 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11279 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11280 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11281 
   11282 	/* PCIe PLL Configuration */
   11283 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11284 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11285 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11286 }
   11287 
   11288 static void
   11289 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11290 {
   11291 	uint32_t reg;
   11292 	uint16_t nvmword;
   11293 	int rv;
   11294 
   11295 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11296 		return;
   11297 
   11298 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11299 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11300 	if (rv != 0) {
   11301 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11302 		    __func__);
   11303 		return;
   11304 	}
   11305 
   11306 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11307 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11308 		reg |= MDICNFG_DEST;
   11309 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11310 		reg |= MDICNFG_COM_MDIO;
   11311 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11312 }
   11313 
   11314 /*
   11315  * I210 Errata 25 and I211 Errata 10
   11316  * Slow System Clock.
   11317  */
   11318 static void
   11319 wm_pll_workaround_i210(struct wm_softc *sc)
   11320 {
   11321 	uint32_t mdicnfg, wuc;
   11322 	uint32_t reg;
   11323 	pcireg_t pcireg;
   11324 	uint32_t pmreg;
   11325 	uint16_t nvmword, tmp_nvmword;
   11326 	int phyval;
   11327 	bool wa_done = false;
   11328 	int i;
   11329 
   11330 	/* Save WUC and MDICNFG registers */
   11331 	wuc = CSR_READ(sc, WMREG_WUC);
   11332 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   11333 
   11334 	reg = mdicnfg & ~MDICNFG_DEST;
   11335 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11336 
   11337 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   11338 		nvmword = INVM_DEFAULT_AL;
   11339 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   11340 
   11341 	/* Get Power Management cap offset */
   11342 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11343 		&pmreg, NULL) == 0)
   11344 		return;
   11345 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   11346 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   11347 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   11348 
   11349 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   11350 			break; /* OK */
   11351 		}
   11352 
   11353 		wa_done = true;
   11354 		/* Directly reset the internal PHY */
   11355 		reg = CSR_READ(sc, WMREG_CTRL);
   11356 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   11357 
   11358 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11359 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   11360 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11361 
   11362 		CSR_WRITE(sc, WMREG_WUC, 0);
   11363 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   11364 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11365 
   11366 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   11367 		    pmreg + PCI_PMCSR);
   11368 		pcireg |= PCI_PMCSR_STATE_D3;
   11369 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11370 		    pmreg + PCI_PMCSR, pcireg);
   11371 		delay(1000);
   11372 		pcireg &= ~PCI_PMCSR_STATE_D3;
   11373 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11374 		    pmreg + PCI_PMCSR, pcireg);
   11375 
   11376 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   11377 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11378 
   11379 		/* Restore WUC register */
   11380 		CSR_WRITE(sc, WMREG_WUC, wuc);
   11381 	}
   11382 
   11383 	/* Restore MDICNFG setting */
   11384 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   11385 	if (wa_done)
   11386 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   11387 }
   11388