Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.372
      1 /*	$NetBSD: if_wm.c,v 1.372 2015/10/22 06:01:41 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- LPLU other than PCH*
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.372 2015/10/22 06:01:41 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    151 
    152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    153 #else
    154 #define	DPRINTF(x, y)	/* nothing */
    155 #endif /* WM_DEBUG */
    156 
    157 #ifdef NET_MPSAFE
    158 #define WM_MPSAFE	1
    159 #endif
    160 
    161 #ifdef __HAVE_PCI_MSI_MSIX
    162 #define WM_MSI_MSIX	1 /* Enable by default */
    163 #endif
    164 
    165 /*
    166  * This device driver's max interrupt numbers.
    167  */
    168 #define WM_MAX_NTXINTR		16
    169 #define WM_MAX_NRXINTR		16
    170 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 /*
    201  * Receive descriptor list size.  We have one Rx buffer for normal
    202  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    203  * packet.  We allocate 256 receive descriptors, each with a 2k
    204  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    205  */
    206 #define	WM_NRXDESC		256
    207 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    208 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    209 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    210 
    211 typedef union txdescs {
    212 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    213 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    214 } txdescs_t;
    215 
    216 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    217 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    218 
    219 /*
    220  * Software state for transmit jobs.
    221  */
    222 struct wm_txsoft {
    223 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    224 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    225 	int txs_firstdesc;		/* first descriptor in packet */
    226 	int txs_lastdesc;		/* last descriptor in packet */
    227 	int txs_ndesc;			/* # of descriptors used */
    228 };
    229 
    230 /*
    231  * Software state for receive buffers.  Each descriptor gets a
    232  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    233  * more than one buffer, we chain them together.
    234  */
    235 struct wm_rxsoft {
    236 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    237 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    238 };
    239 
    240 #define WM_LINKUP_TIMEOUT	50
    241 
    242 static uint16_t swfwphysem[] = {
    243 	SWFW_PHY0_SM,
    244 	SWFW_PHY1_SM,
    245 	SWFW_PHY2_SM,
    246 	SWFW_PHY3_SM
    247 };
    248 
    249 static const uint32_t wm_82580_rxpbs_table[] = {
    250 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    251 };
    252 
    253 struct wm_softc;
    254 
    255 struct wm_txqueue {
    256 	kmutex_t *txq_lock;		/* lock for tx operations */
    257 
    258 	struct wm_softc *txq_sc;
    259 
    260 	int txq_id;			/* index of transmit queues */
    261 	int txq_intr_idx;		/* index of MSI-X tables */
    262 
    263 	/* Software state for the transmit descriptors. */
    264 	int txq_num;			/* must be a power of two */
    265 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    266 
    267 	/* TX control data structures. */
    268 	int txq_ndesc;			/* must be a power of two */
    269 	txdescs_t *txq_descs_u;
    270         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    271 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    272 	int txq_desc_rseg;		/* real number of control segment */
    273 	size_t txq_desc_size;		/* control data size */
    274 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    275 #define	txq_descs	txq_descs_u->sctxu_txdescs
    276 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    277 
    278 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    279 
    280 	int txq_free;			/* number of free Tx descriptors */
    281 	int txq_next;			/* next ready Tx descriptor */
    282 
    283 	int txq_sfree;			/* number of free Tx jobs */
    284 	int txq_snext;			/* next free Tx job */
    285 	int txq_sdirty;			/* dirty Tx jobs */
    286 
    287 	/* These 4 variables are used only on the 82547. */
    288 	int txq_fifo_size;		/* Tx FIFO size */
    289 	int txq_fifo_head;		/* current head of FIFO */
    290 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    291 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    292 
    293 	/* XXX which event counter is required? */
    294 };
    295 
    296 struct wm_rxqueue {
    297 	kmutex_t *rxq_lock;		/* lock for rx operations */
    298 
    299 	struct wm_softc *rxq_sc;
    300 
    301 	int rxq_id;			/* index of receive queues */
    302 	int rxq_intr_idx;		/* index of MSI-X tables */
    303 
    304 	/* Software state for the receive descriptors. */
    305 	wiseman_rxdesc_t *rxq_descs;
    306 
    307 	/* RX control data structures. */
    308 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    309 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    310 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    311 	int rxq_desc_rseg;		/* real number of control segment */
    312 	size_t rxq_desc_size;		/* control data size */
    313 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    314 
    315 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    316 
    317 	int rxq_ptr;			/* next ready Rx descriptor/queue ent */
    318 	int rxq_discard;
    319 	int rxq_len;
    320 	struct mbuf *rxq_head;
    321 	struct mbuf *rxq_tail;
    322 	struct mbuf **rxq_tailp;
    323 
    324 	/* XXX which event counter is required? */
    325 };
    326 
    327 /*
    328  * Software state per device.
    329  */
    330 struct wm_softc {
    331 	device_t sc_dev;		/* generic device information */
    332 	bus_space_tag_t sc_st;		/* bus space tag */
    333 	bus_space_handle_t sc_sh;	/* bus space handle */
    334 	bus_size_t sc_ss;		/* bus space size */
    335 	bus_space_tag_t sc_iot;		/* I/O space tag */
    336 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    337 	bus_size_t sc_ios;		/* I/O space size */
    338 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    339 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    340 	bus_size_t sc_flashs;		/* flash registers space size */
    341 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    342 
    343 	struct ethercom sc_ethercom;	/* ethernet common data */
    344 	struct mii_data sc_mii;		/* MII/media information */
    345 
    346 	pci_chipset_tag_t sc_pc;
    347 	pcitag_t sc_pcitag;
    348 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    349 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    350 
    351 	uint16_t sc_pcidevid;		/* PCI device ID */
    352 	wm_chip_type sc_type;		/* MAC type */
    353 	int sc_rev;			/* MAC revision */
    354 	wm_phy_type sc_phytype;		/* PHY type */
    355 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    356 #define	WM_MEDIATYPE_UNKNOWN		0x00
    357 #define	WM_MEDIATYPE_FIBER		0x01
    358 #define	WM_MEDIATYPE_COPPER		0x02
    359 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    360 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    361 	int sc_flags;			/* flags; see below */
    362 	int sc_if_flags;		/* last if_flags */
    363 	int sc_flowflags;		/* 802.3x flow control flags */
    364 	int sc_align_tweak;
    365 
    366 	void *sc_ihs[WM_MAX_NINTR];	/*
    367 					 * interrupt cookie.
    368 					 * legacy and msi use sc_ihs[0].
    369 					 */
    370 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    371 	int sc_nintrs;			/* number of interrupts */
    372 
    373 	int sc_link_intr_idx;		/* index of MSI-X tables */
    374 
    375 	callout_t sc_tick_ch;		/* tick callout */
    376 	bool sc_stopping;
    377 
    378 	int sc_nvm_ver_major;
    379 	int sc_nvm_ver_minor;
    380 	int sc_nvm_ver_build;
    381 	int sc_nvm_addrbits;		/* NVM address bits */
    382 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    383 	int sc_ich8_flash_base;
    384 	int sc_ich8_flash_bank_size;
    385 	int sc_nvm_k1_enabled;
    386 
    387 	int sc_ntxqueues;
    388 	struct wm_txqueue *sc_txq;
    389 
    390 	int sc_nrxqueues;
    391 	struct wm_rxqueue *sc_rxq;
    392 
    393 #ifdef WM_EVENT_COUNTERS
    394 	/* Event counters. */
    395 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    396 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    397 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    398 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    399 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    400 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    401 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    402 
    403 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    404 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    405 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    406 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    407 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    408 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    409 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    410 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    411 
    412 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    413 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    414 
    415 	struct evcnt sc_ev_tu;		/* Tx underrun */
    416 
    417 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    418 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    419 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    420 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    421 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    422 #endif /* WM_EVENT_COUNTERS */
    423 
    424 	/* This variable are used only on the 82547. */
    425 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    426 
    427 	uint32_t sc_ctrl;		/* prototype CTRL register */
    428 #if 0
    429 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    430 #endif
    431 	uint32_t sc_icr;		/* prototype interrupt bits */
    432 	uint32_t sc_itr;		/* prototype intr throttling reg */
    433 	uint32_t sc_tctl;		/* prototype TCTL register */
    434 	uint32_t sc_rctl;		/* prototype RCTL register */
    435 	uint32_t sc_txcw;		/* prototype TXCW register */
    436 	uint32_t sc_tipg;		/* prototype TIPG register */
    437 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    438 	uint32_t sc_pba;		/* prototype PBA register */
    439 
    440 	int sc_tbi_linkup;		/* TBI link status */
    441 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    442 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    443 
    444 	int sc_mchash_type;		/* multicast filter offset */
    445 
    446 	krndsource_t rnd_source;	/* random source */
    447 
    448 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    449 };
    450 
    451 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    452 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    453 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    454 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    455 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    456 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    457 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    458 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    459 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    460 
    461 #ifdef WM_MPSAFE
    462 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    463 #else
    464 #define CALLOUT_FLAGS	0
    465 #endif
    466 
    467 #define	WM_RXCHAIN_RESET(rxq)						\
    468 do {									\
    469 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    470 	*(rxq)->rxq_tailp = NULL;					\
    471 	(rxq)->rxq_len = 0;						\
    472 } while (/*CONSTCOND*/0)
    473 
    474 #define	WM_RXCHAIN_LINK(rxq, m)						\
    475 do {									\
    476 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    477 	(rxq)->rxq_tailp = &(m)->m_next;				\
    478 } while (/*CONSTCOND*/0)
    479 
    480 #ifdef WM_EVENT_COUNTERS
    481 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    482 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    483 #else
    484 #define	WM_EVCNT_INCR(ev)	/* nothing */
    485 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    486 #endif
    487 
    488 #define	CSR_READ(sc, reg)						\
    489 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    490 #define	CSR_WRITE(sc, reg, val)						\
    491 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    492 #define	CSR_WRITE_FLUSH(sc)						\
    493 	(void) CSR_READ((sc), WMREG_STATUS)
    494 
    495 #define ICH8_FLASH_READ32(sc, reg) \
    496 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    497 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    498 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    499 
    500 #define ICH8_FLASH_READ16(sc, reg) \
    501 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    502 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    503 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    504 
    505 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    506 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    507 
    508 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    509 #define	WM_CDTXADDR_HI(txq, x)						\
    510 	(sizeof(bus_addr_t) == 8 ?					\
    511 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    512 
    513 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    514 #define	WM_CDRXADDR_HI(rxq, x)						\
    515 	(sizeof(bus_addr_t) == 8 ?					\
    516 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    517 
    518 /*
    519  * Register read/write functions.
    520  * Other than CSR_{READ|WRITE}().
    521  */
    522 #if 0
    523 static inline uint32_t wm_io_read(struct wm_softc *, int);
    524 #endif
    525 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    526 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    527 	uint32_t, uint32_t);
    528 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    529 
    530 /*
    531  * Descriptor sync/init functions.
    532  */
    533 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    534 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    535 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    536 
    537 /*
    538  * Device driver interface functions and commonly used functions.
    539  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    540  */
    541 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    542 static int	wm_match(device_t, cfdata_t, void *);
    543 static void	wm_attach(device_t, device_t, void *);
    544 static int	wm_detach(device_t, int);
    545 static bool	wm_suspend(device_t, const pmf_qual_t *);
    546 static bool	wm_resume(device_t, const pmf_qual_t *);
    547 static void	wm_watchdog(struct ifnet *);
    548 static void	wm_tick(void *);
    549 static int	wm_ifflags_cb(struct ethercom *);
    550 static int	wm_ioctl(struct ifnet *, u_long, void *);
    551 /* MAC address related */
    552 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    553 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    554 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    555 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    556 static void	wm_set_filter(struct wm_softc *);
    557 /* Reset and init related */
    558 static void	wm_set_vlan(struct wm_softc *);
    559 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    560 static void	wm_get_auto_rd_done(struct wm_softc *);
    561 static void	wm_lan_init_done(struct wm_softc *);
    562 static void	wm_get_cfg_done(struct wm_softc *);
    563 static void	wm_initialize_hardware_bits(struct wm_softc *);
    564 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    565 static void	wm_reset(struct wm_softc *);
    566 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    567 static void	wm_rxdrain(struct wm_rxqueue *);
    568 static void	wm_rss_getkey(uint8_t *);
    569 static void	wm_init_rss(struct wm_softc *);
    570 #ifdef WM_MSI_MSIX
    571 static void	wm_adjust_qnum(struct wm_softc *, int);
    572 static int	wm_setup_legacy(struct wm_softc *);
    573 static int	wm_setup_msix(struct wm_softc *);
    574 #endif
    575 static int	wm_init(struct ifnet *);
    576 static int	wm_init_locked(struct ifnet *);
    577 static void	wm_stop(struct ifnet *, int);
    578 static void	wm_stop_locked(struct ifnet *, int);
    579 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    580 static void	wm_82547_txfifo_stall(void *);
    581 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    582 /* DMA related */
    583 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    584 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    585 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    586 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    587 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    588 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    589 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    590 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    591 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    592 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    593 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    594 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    595 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    596 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    597 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    598 static int	wm_alloc_txrx_queues(struct wm_softc *);
    599 static void	wm_free_txrx_queues(struct wm_softc *);
    600 static int	wm_init_txrx_queues(struct wm_softc *);
    601 /* Start */
    602 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    603     uint32_t *, uint8_t *);
    604 static void	wm_start(struct ifnet *);
    605 static void	wm_start_locked(struct ifnet *);
    606 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    607     uint32_t *, uint32_t *, bool *);
    608 static void	wm_nq_start(struct ifnet *);
    609 static void	wm_nq_start_locked(struct ifnet *);
    610 /* Interrupt */
    611 static int	wm_txeof(struct wm_softc *);
    612 static void	wm_rxeof(struct wm_rxqueue *);
    613 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    614 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    615 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    616 static void	wm_linkintr(struct wm_softc *, uint32_t);
    617 static int	wm_intr_legacy(void *);
    618 #ifdef WM_MSI_MSIX
    619 static int	wm_txintr_msix(void *);
    620 static int	wm_rxintr_msix(void *);
    621 static int	wm_linkintr_msix(void *);
    622 #endif
    623 
    624 /*
    625  * Media related.
    626  * GMII, SGMII, TBI, SERDES and SFP.
    627  */
    628 /* Common */
    629 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    630 /* GMII related */
    631 static void	wm_gmii_reset(struct wm_softc *);
    632 static int	wm_get_phy_id_82575(struct wm_softc *);
    633 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    634 static int	wm_gmii_mediachange(struct ifnet *);
    635 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    636 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    637 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    638 static int	wm_gmii_i82543_readreg(device_t, int, int);
    639 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    640 static int	wm_gmii_i82544_readreg(device_t, int, int);
    641 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    642 static int	wm_gmii_i80003_readreg(device_t, int, int);
    643 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    644 static int	wm_gmii_bm_readreg(device_t, int, int);
    645 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    646 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    647 static int	wm_gmii_hv_readreg(device_t, int, int);
    648 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    649 static int	wm_gmii_82580_readreg(device_t, int, int);
    650 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    651 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    652 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    653 static void	wm_gmii_statchg(struct ifnet *);
    654 static int	wm_kmrn_readreg(struct wm_softc *, int);
    655 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    656 /* SGMII */
    657 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    658 static int	wm_sgmii_readreg(device_t, int, int);
    659 static void	wm_sgmii_writereg(device_t, int, int, int);
    660 /* TBI related */
    661 static void	wm_tbi_mediainit(struct wm_softc *);
    662 static int	wm_tbi_mediachange(struct ifnet *);
    663 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    664 static int	wm_check_for_link(struct wm_softc *);
    665 static void	wm_tbi_tick(struct wm_softc *);
    666 /* SERDES related */
    667 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    668 static int	wm_serdes_mediachange(struct ifnet *);
    669 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    670 static void	wm_serdes_tick(struct wm_softc *);
    671 /* SFP related */
    672 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    673 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    674 
    675 /*
    676  * NVM related.
    677  * Microwire, SPI (w/wo EERD) and Flash.
    678  */
    679 /* Misc functions */
    680 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    681 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    682 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    683 /* Microwire */
    684 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    685 /* SPI */
    686 static int	wm_nvm_ready_spi(struct wm_softc *);
    687 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    688 /* Using with EERD */
    689 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    690 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    691 /* Flash */
    692 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    693     unsigned int *);
    694 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    695 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    696 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    697 	uint16_t *);
    698 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    699 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    700 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    701 /* iNVM */
    702 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    703 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    704 /* Lock, detecting NVM type, validate checksum and read */
    705 static int	wm_nvm_acquire(struct wm_softc *);
    706 static void	wm_nvm_release(struct wm_softc *);
    707 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    708 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    709 static int	wm_nvm_validate_checksum(struct wm_softc *);
    710 static void	wm_nvm_version_invm(struct wm_softc *);
    711 static void	wm_nvm_version(struct wm_softc *);
    712 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    713 
    714 /*
    715  * Hardware semaphores.
    716  * Very complexed...
    717  */
    718 static int	wm_get_swsm_semaphore(struct wm_softc *);
    719 static void	wm_put_swsm_semaphore(struct wm_softc *);
    720 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    721 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    722 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    723 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    724 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    725 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    726 
    727 /*
    728  * Management mode and power management related subroutines.
    729  * BMC, AMT, suspend/resume and EEE.
    730  */
    731 static int	wm_check_mng_mode(struct wm_softc *);
    732 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    733 static int	wm_check_mng_mode_82574(struct wm_softc *);
    734 static int	wm_check_mng_mode_generic(struct wm_softc *);
    735 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    736 static int	wm_check_reset_block(struct wm_softc *);
    737 static void	wm_get_hw_control(struct wm_softc *);
    738 static void	wm_release_hw_control(struct wm_softc *);
    739 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    740 static void	wm_smbustopci(struct wm_softc *);
    741 static void	wm_init_manageability(struct wm_softc *);
    742 static void	wm_release_manageability(struct wm_softc *);
    743 static void	wm_get_wakeup(struct wm_softc *);
    744 #ifdef WM_WOL
    745 static void	wm_enable_phy_wakeup(struct wm_softc *);
    746 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    747 static void	wm_enable_wakeup(struct wm_softc *);
    748 #endif
    749 /* EEE */
    750 static void	wm_set_eee_i350(struct wm_softc *);
    751 
    752 /*
    753  * Workarounds (mainly PHY related).
    754  * Basically, PHY's workarounds are in the PHY drivers.
    755  */
    756 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    757 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    758 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    759 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    760 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    761 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    762 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    763 static void	wm_reset_init_script_82575(struct wm_softc *);
    764 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    765 static void	wm_pll_workaround_i210(struct wm_softc *);
    766 
    767 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    768     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    769 
    770 /*
    771  * Devices supported by this driver.
    772  */
    773 static const struct wm_product {
    774 	pci_vendor_id_t		wmp_vendor;
    775 	pci_product_id_t	wmp_product;
    776 	const char		*wmp_name;
    777 	wm_chip_type		wmp_type;
    778 	uint32_t		wmp_flags;
    779 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    780 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    781 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    782 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    783 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    784 } wm_products[] = {
    785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    786 	  "Intel i82542 1000BASE-X Ethernet",
    787 	  WM_T_82542_2_1,	WMP_F_FIBER },
    788 
    789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    790 	  "Intel i82543GC 1000BASE-X Ethernet",
    791 	  WM_T_82543,		WMP_F_FIBER },
    792 
    793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    794 	  "Intel i82543GC 1000BASE-T Ethernet",
    795 	  WM_T_82543,		WMP_F_COPPER },
    796 
    797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    798 	  "Intel i82544EI 1000BASE-T Ethernet",
    799 	  WM_T_82544,		WMP_F_COPPER },
    800 
    801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    802 	  "Intel i82544EI 1000BASE-X Ethernet",
    803 	  WM_T_82544,		WMP_F_FIBER },
    804 
    805 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    806 	  "Intel i82544GC 1000BASE-T Ethernet",
    807 	  WM_T_82544,		WMP_F_COPPER },
    808 
    809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    810 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    811 	  WM_T_82544,		WMP_F_COPPER },
    812 
    813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    814 	  "Intel i82540EM 1000BASE-T Ethernet",
    815 	  WM_T_82540,		WMP_F_COPPER },
    816 
    817 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    818 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    819 	  WM_T_82540,		WMP_F_COPPER },
    820 
    821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    822 	  "Intel i82540EP 1000BASE-T Ethernet",
    823 	  WM_T_82540,		WMP_F_COPPER },
    824 
    825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    826 	  "Intel i82540EP 1000BASE-T Ethernet",
    827 	  WM_T_82540,		WMP_F_COPPER },
    828 
    829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    830 	  "Intel i82540EP 1000BASE-T Ethernet",
    831 	  WM_T_82540,		WMP_F_COPPER },
    832 
    833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    834 	  "Intel i82545EM 1000BASE-T Ethernet",
    835 	  WM_T_82545,		WMP_F_COPPER },
    836 
    837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    838 	  "Intel i82545GM 1000BASE-T Ethernet",
    839 	  WM_T_82545_3,		WMP_F_COPPER },
    840 
    841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    842 	  "Intel i82545GM 1000BASE-X Ethernet",
    843 	  WM_T_82545_3,		WMP_F_FIBER },
    844 
    845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    846 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    847 	  WM_T_82545_3,		WMP_F_SERDES },
    848 
    849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    850 	  "Intel i82546EB 1000BASE-T Ethernet",
    851 	  WM_T_82546,		WMP_F_COPPER },
    852 
    853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    854 	  "Intel i82546EB 1000BASE-T Ethernet",
    855 	  WM_T_82546,		WMP_F_COPPER },
    856 
    857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    858 	  "Intel i82545EM 1000BASE-X Ethernet",
    859 	  WM_T_82545,		WMP_F_FIBER },
    860 
    861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    862 	  "Intel i82546EB 1000BASE-X Ethernet",
    863 	  WM_T_82546,		WMP_F_FIBER },
    864 
    865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    866 	  "Intel i82546GB 1000BASE-T Ethernet",
    867 	  WM_T_82546_3,		WMP_F_COPPER },
    868 
    869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    870 	  "Intel i82546GB 1000BASE-X Ethernet",
    871 	  WM_T_82546_3,		WMP_F_FIBER },
    872 
    873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    874 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    875 	  WM_T_82546_3,		WMP_F_SERDES },
    876 
    877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    878 	  "i82546GB quad-port Gigabit Ethernet",
    879 	  WM_T_82546_3,		WMP_F_COPPER },
    880 
    881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    882 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    883 	  WM_T_82546_3,		WMP_F_COPPER },
    884 
    885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    886 	  "Intel PRO/1000MT (82546GB)",
    887 	  WM_T_82546_3,		WMP_F_COPPER },
    888 
    889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    890 	  "Intel i82541EI 1000BASE-T Ethernet",
    891 	  WM_T_82541,		WMP_F_COPPER },
    892 
    893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    894 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    895 	  WM_T_82541,		WMP_F_COPPER },
    896 
    897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    898 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    899 	  WM_T_82541,		WMP_F_COPPER },
    900 
    901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    902 	  "Intel i82541ER 1000BASE-T Ethernet",
    903 	  WM_T_82541_2,		WMP_F_COPPER },
    904 
    905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    906 	  "Intel i82541GI 1000BASE-T Ethernet",
    907 	  WM_T_82541_2,		WMP_F_COPPER },
    908 
    909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    910 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    911 	  WM_T_82541_2,		WMP_F_COPPER },
    912 
    913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    914 	  "Intel i82541PI 1000BASE-T Ethernet",
    915 	  WM_T_82541_2,		WMP_F_COPPER },
    916 
    917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    918 	  "Intel i82547EI 1000BASE-T Ethernet",
    919 	  WM_T_82547,		WMP_F_COPPER },
    920 
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    922 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    923 	  WM_T_82547,		WMP_F_COPPER },
    924 
    925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    926 	  "Intel i82547GI 1000BASE-T Ethernet",
    927 	  WM_T_82547_2,		WMP_F_COPPER },
    928 
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    930 	  "Intel PRO/1000 PT (82571EB)",
    931 	  WM_T_82571,		WMP_F_COPPER },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    934 	  "Intel PRO/1000 PF (82571EB)",
    935 	  WM_T_82571,		WMP_F_FIBER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    938 	  "Intel PRO/1000 PB (82571EB)",
    939 	  WM_T_82571,		WMP_F_SERDES },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    942 	  "Intel PRO/1000 QT (82571EB)",
    943 	  WM_T_82571,		WMP_F_COPPER },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    946 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    947 	  WM_T_82571,		WMP_F_COPPER, },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    950 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    951 	  WM_T_82571,		WMP_F_COPPER, },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    954 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    955 	  WM_T_82571,		WMP_F_SERDES, },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    958 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    959 	  WM_T_82571,		WMP_F_SERDES, },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    962 	  "Intel 82571EB Quad 1000baseX Ethernet",
    963 	  WM_T_82571,		WMP_F_FIBER, },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    966 	  "Intel i82572EI 1000baseT Ethernet",
    967 	  WM_T_82572,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    970 	  "Intel i82572EI 1000baseX Ethernet",
    971 	  WM_T_82572,		WMP_F_FIBER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    974 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    975 	  WM_T_82572,		WMP_F_SERDES },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    978 	  "Intel i82572EI 1000baseT Ethernet",
    979 	  WM_T_82572,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    982 	  "Intel i82573E",
    983 	  WM_T_82573,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    986 	  "Intel i82573E IAMT",
    987 	  WM_T_82573,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    990 	  "Intel i82573L Gigabit Ethernet",
    991 	  WM_T_82573,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    994 	  "Intel i82574L",
    995 	  WM_T_82574,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    998 	  "Intel i82574L",
    999 	  WM_T_82574,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1002 	  "Intel i82583V",
   1003 	  WM_T_82583,		WMP_F_COPPER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1006 	  "i80003 dual 1000baseT Ethernet",
   1007 	  WM_T_80003,		WMP_F_COPPER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1010 	  "i80003 dual 1000baseX Ethernet",
   1011 	  WM_T_80003,		WMP_F_COPPER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1014 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1015 	  WM_T_80003,		WMP_F_SERDES },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1018 	  "Intel i80003 1000baseT Ethernet",
   1019 	  WM_T_80003,		WMP_F_COPPER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1022 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1023 	  WM_T_80003,		WMP_F_SERDES },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1026 	  "Intel i82801H (M_AMT) LAN Controller",
   1027 	  WM_T_ICH8,		WMP_F_COPPER },
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1029 	  "Intel i82801H (AMT) LAN Controller",
   1030 	  WM_T_ICH8,		WMP_F_COPPER },
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1032 	  "Intel i82801H LAN Controller",
   1033 	  WM_T_ICH8,		WMP_F_COPPER },
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1035 	  "Intel i82801H (IFE) LAN Controller",
   1036 	  WM_T_ICH8,		WMP_F_COPPER },
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1038 	  "Intel i82801H (M) LAN Controller",
   1039 	  WM_T_ICH8,		WMP_F_COPPER },
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1041 	  "Intel i82801H IFE (GT) LAN Controller",
   1042 	  WM_T_ICH8,		WMP_F_COPPER },
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1044 	  "Intel i82801H IFE (G) LAN Controller",
   1045 	  WM_T_ICH8,		WMP_F_COPPER },
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1047 	  "82801I (AMT) LAN Controller",
   1048 	  WM_T_ICH9,		WMP_F_COPPER },
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1050 	  "82801I LAN Controller",
   1051 	  WM_T_ICH9,		WMP_F_COPPER },
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1053 	  "82801I (G) LAN Controller",
   1054 	  WM_T_ICH9,		WMP_F_COPPER },
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1056 	  "82801I (GT) LAN Controller",
   1057 	  WM_T_ICH9,		WMP_F_COPPER },
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1059 	  "82801I (C) LAN Controller",
   1060 	  WM_T_ICH9,		WMP_F_COPPER },
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1062 	  "82801I mobile LAN Controller",
   1063 	  WM_T_ICH9,		WMP_F_COPPER },
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1065 	  "82801I mobile (V) LAN Controller",
   1066 	  WM_T_ICH9,		WMP_F_COPPER },
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1068 	  "82801I mobile (AMT) LAN Controller",
   1069 	  WM_T_ICH9,		WMP_F_COPPER },
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1071 	  "82567LM-4 LAN Controller",
   1072 	  WM_T_ICH9,		WMP_F_COPPER },
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1074 	  "82567V-3 LAN Controller",
   1075 	  WM_T_ICH9,		WMP_F_COPPER },
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1077 	  "82567LM-2 LAN Controller",
   1078 	  WM_T_ICH10,		WMP_F_COPPER },
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1080 	  "82567LF-2 LAN Controller",
   1081 	  WM_T_ICH10,		WMP_F_COPPER },
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1083 	  "82567LM-3 LAN Controller",
   1084 	  WM_T_ICH10,		WMP_F_COPPER },
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1086 	  "82567LF-3 LAN Controller",
   1087 	  WM_T_ICH10,		WMP_F_COPPER },
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1089 	  "82567V-2 LAN Controller",
   1090 	  WM_T_ICH10,		WMP_F_COPPER },
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1092 	  "82567V-3? LAN Controller",
   1093 	  WM_T_ICH10,		WMP_F_COPPER },
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1095 	  "HANKSVILLE LAN Controller",
   1096 	  WM_T_ICH10,		WMP_F_COPPER },
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1098 	  "PCH LAN (82577LM) Controller",
   1099 	  WM_T_PCH,		WMP_F_COPPER },
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1101 	  "PCH LAN (82577LC) Controller",
   1102 	  WM_T_PCH,		WMP_F_COPPER },
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1104 	  "PCH LAN (82578DM) Controller",
   1105 	  WM_T_PCH,		WMP_F_COPPER },
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1107 	  "PCH LAN (82578DC) Controller",
   1108 	  WM_T_PCH,		WMP_F_COPPER },
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1110 	  "PCH2 LAN (82579LM) Controller",
   1111 	  WM_T_PCH2,		WMP_F_COPPER },
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1113 	  "PCH2 LAN (82579V) Controller",
   1114 	  WM_T_PCH2,		WMP_F_COPPER },
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1116 	  "82575EB dual-1000baseT Ethernet",
   1117 	  WM_T_82575,		WMP_F_COPPER },
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1119 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1120 	  WM_T_82575,		WMP_F_SERDES },
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1122 	  "82575GB quad-1000baseT Ethernet",
   1123 	  WM_T_82575,		WMP_F_COPPER },
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1125 	  "82575GB quad-1000baseT Ethernet (PM)",
   1126 	  WM_T_82575,		WMP_F_COPPER },
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1128 	  "82576 1000BaseT Ethernet",
   1129 	  WM_T_82576,		WMP_F_COPPER },
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1131 	  "82576 1000BaseX Ethernet",
   1132 	  WM_T_82576,		WMP_F_FIBER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1135 	  "82576 gigabit Ethernet (SERDES)",
   1136 	  WM_T_82576,		WMP_F_SERDES },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1139 	  "82576 quad-1000BaseT Ethernet",
   1140 	  WM_T_82576,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1143 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1144 	  WM_T_82576,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1147 	  "82576 gigabit Ethernet",
   1148 	  WM_T_82576,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1151 	  "82576 gigabit Ethernet (SERDES)",
   1152 	  WM_T_82576,		WMP_F_SERDES },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1154 	  "82576 quad-gigabit Ethernet (SERDES)",
   1155 	  WM_T_82576,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1158 	  "82580 1000BaseT Ethernet",
   1159 	  WM_T_82580,		WMP_F_COPPER },
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1161 	  "82580 1000BaseX Ethernet",
   1162 	  WM_T_82580,		WMP_F_FIBER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1165 	  "82580 1000BaseT Ethernet (SERDES)",
   1166 	  WM_T_82580,		WMP_F_SERDES },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1169 	  "82580 gigabit Ethernet (SGMII)",
   1170 	  WM_T_82580,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1172 	  "82580 dual-1000BaseT Ethernet",
   1173 	  WM_T_82580,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1176 	  "82580 quad-1000BaseX Ethernet",
   1177 	  WM_T_82580,		WMP_F_FIBER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1180 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1181 	  WM_T_82580,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1184 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1185 	  WM_T_82580,		WMP_F_SERDES },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1188 	  "DH89XXCC 1000BASE-KX Ethernet",
   1189 	  WM_T_82580,		WMP_F_SERDES },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1192 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1193 	  WM_T_82580,		WMP_F_SERDES },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1196 	  "I350 Gigabit Network Connection",
   1197 	  WM_T_I350,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1200 	  "I350 Gigabit Fiber Network Connection",
   1201 	  WM_T_I350,		WMP_F_FIBER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1204 	  "I350 Gigabit Backplane Connection",
   1205 	  WM_T_I350,		WMP_F_SERDES },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1208 	  "I350 Quad Port Gigabit Ethernet",
   1209 	  WM_T_I350,		WMP_F_SERDES },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1212 	  "I350 Gigabit Connection",
   1213 	  WM_T_I350,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1216 	  "I354 Gigabit Ethernet (KX)",
   1217 	  WM_T_I354,		WMP_F_SERDES },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1220 	  "I354 Gigabit Ethernet (SGMII)",
   1221 	  WM_T_I354,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1224 	  "I354 Gigabit Ethernet (2.5G)",
   1225 	  WM_T_I354,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1228 	  "I210-T1 Ethernet Server Adapter",
   1229 	  WM_T_I210,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1232 	  "I210 Ethernet (Copper OEM)",
   1233 	  WM_T_I210,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1236 	  "I210 Ethernet (Copper IT)",
   1237 	  WM_T_I210,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1240 	  "I210 Ethernet (FLASH less)",
   1241 	  WM_T_I210,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1244 	  "I210 Gigabit Ethernet (Fiber)",
   1245 	  WM_T_I210,		WMP_F_FIBER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1248 	  "I210 Gigabit Ethernet (SERDES)",
   1249 	  WM_T_I210,		WMP_F_SERDES },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1252 	  "I210 Gigabit Ethernet (FLASH less)",
   1253 	  WM_T_I210,		WMP_F_SERDES },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1256 	  "I210 Gigabit Ethernet (SGMII)",
   1257 	  WM_T_I210,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1260 	  "I211 Ethernet (COPPER)",
   1261 	  WM_T_I211,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1263 	  "I217 V Ethernet Connection",
   1264 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1266 	  "I217 LM Ethernet Connection",
   1267 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1269 	  "I218 V Ethernet Connection",
   1270 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1272 	  "I218 V Ethernet Connection",
   1273 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1275 	  "I218 V Ethernet Connection",
   1276 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1278 	  "I218 LM Ethernet Connection",
   1279 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1281 	  "I218 LM Ethernet Connection",
   1282 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1284 	  "I218 LM Ethernet Connection",
   1285 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1286 	{ 0,			0,
   1287 	  NULL,
   1288 	  0,			0 },
   1289 };
   1290 
   1291 #ifdef WM_EVENT_COUNTERS
   1292 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1293 #endif /* WM_EVENT_COUNTERS */
   1294 
   1295 
   1296 /*
   1297  * Register read/write functions.
   1298  * Other than CSR_{READ|WRITE}().
   1299  */
   1300 
   1301 #if 0 /* Not currently used */
   1302 static inline uint32_t
   1303 wm_io_read(struct wm_softc *sc, int reg)
   1304 {
   1305 
   1306 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1307 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1308 }
   1309 #endif
   1310 
   1311 static inline void
   1312 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1313 {
   1314 
   1315 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1316 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1317 }
   1318 
   1319 static inline void
   1320 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1321     uint32_t data)
   1322 {
   1323 	uint32_t regval;
   1324 	int i;
   1325 
   1326 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1327 
   1328 	CSR_WRITE(sc, reg, regval);
   1329 
   1330 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1331 		delay(5);
   1332 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1333 			break;
   1334 	}
   1335 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1336 		aprint_error("%s: WARNING:"
   1337 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1338 		    device_xname(sc->sc_dev), reg);
   1339 	}
   1340 }
   1341 
   1342 static inline void
   1343 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1344 {
   1345 	wa->wa_low = htole32(v & 0xffffffffU);
   1346 	if (sizeof(bus_addr_t) == 8)
   1347 		wa->wa_high = htole32((uint64_t) v >> 32);
   1348 	else
   1349 		wa->wa_high = 0;
   1350 }
   1351 
   1352 /*
   1353  * Descriptor sync/init functions.
   1354  */
   1355 static inline void
   1356 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1357 {
   1358 	struct wm_softc *sc = txq->txq_sc;
   1359 
   1360 	/* If it will wrap around, sync to the end of the ring. */
   1361 	if ((start + num) > WM_NTXDESC(txq)) {
   1362 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1363 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1364 		    (WM_NTXDESC(txq) - start), ops);
   1365 		num -= (WM_NTXDESC(txq) - start);
   1366 		start = 0;
   1367 	}
   1368 
   1369 	/* Now sync whatever is left. */
   1370 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1371 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1372 }
   1373 
   1374 static inline void
   1375 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1376 {
   1377 	struct wm_softc *sc = rxq->rxq_sc;
   1378 
   1379 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1380 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1381 }
   1382 
   1383 static inline void
   1384 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1385 {
   1386 	struct wm_softc *sc = rxq->rxq_sc;
   1387 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1388 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1389 	struct mbuf *m = rxs->rxs_mbuf;
   1390 
   1391 	/*
   1392 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1393 	 * so that the payload after the Ethernet header is aligned
   1394 	 * to a 4-byte boundary.
   1395 
   1396 	 * XXX BRAINDAMAGE ALERT!
   1397 	 * The stupid chip uses the same size for every buffer, which
   1398 	 * is set in the Receive Control register.  We are using the 2K
   1399 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1400 	 * reason, we can't "scoot" packets longer than the standard
   1401 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1402 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1403 	 * the upper layer copy the headers.
   1404 	 */
   1405 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1406 
   1407 	wm_set_dma_addr(&rxd->wrx_addr,
   1408 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1409 	rxd->wrx_len = 0;
   1410 	rxd->wrx_cksum = 0;
   1411 	rxd->wrx_status = 0;
   1412 	rxd->wrx_errors = 0;
   1413 	rxd->wrx_special = 0;
   1414 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1415 
   1416 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1417 }
   1418 
   1419 /*
   1420  * Device driver interface functions and commonly used functions.
   1421  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1422  */
   1423 
   1424 /* Lookup supported device table */
   1425 static const struct wm_product *
   1426 wm_lookup(const struct pci_attach_args *pa)
   1427 {
   1428 	const struct wm_product *wmp;
   1429 
   1430 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1431 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1432 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1433 			return wmp;
   1434 	}
   1435 	return NULL;
   1436 }
   1437 
   1438 /* The match function (ca_match) */
   1439 static int
   1440 wm_match(device_t parent, cfdata_t cf, void *aux)
   1441 {
   1442 	struct pci_attach_args *pa = aux;
   1443 
   1444 	if (wm_lookup(pa) != NULL)
   1445 		return 1;
   1446 
   1447 	return 0;
   1448 }
   1449 
   1450 /* The attach function (ca_attach) */
   1451 static void
   1452 wm_attach(device_t parent, device_t self, void *aux)
   1453 {
   1454 	struct wm_softc *sc = device_private(self);
   1455 	struct pci_attach_args *pa = aux;
   1456 	prop_dictionary_t dict;
   1457 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1458 	pci_chipset_tag_t pc = pa->pa_pc;
   1459 #ifndef WM_MSI_MSIX
   1460 	pci_intr_handle_t ih;
   1461 	const char *intrstr = NULL;
   1462 	char intrbuf[PCI_INTRSTR_LEN];
   1463 #else
   1464 	int counts[PCI_INTR_TYPE_SIZE];
   1465 	pci_intr_type_t max_type;
   1466 #endif
   1467 	const char *eetype, *xname;
   1468 	bus_space_tag_t memt;
   1469 	bus_space_handle_t memh;
   1470 	bus_size_t memsize;
   1471 	int memh_valid;
   1472 	int i, error;
   1473 	const struct wm_product *wmp;
   1474 	prop_data_t ea;
   1475 	prop_number_t pn;
   1476 	uint8_t enaddr[ETHER_ADDR_LEN];
   1477 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1478 	pcireg_t preg, memtype;
   1479 	uint16_t eeprom_data, apme_mask;
   1480 	bool force_clear_smbi;
   1481 	uint32_t link_mode;
   1482 	uint32_t reg;
   1483 
   1484 	sc->sc_dev = self;
   1485 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1486 	sc->sc_stopping = false;
   1487 
   1488 	wmp = wm_lookup(pa);
   1489 #ifdef DIAGNOSTIC
   1490 	if (wmp == NULL) {
   1491 		printf("\n");
   1492 		panic("wm_attach: impossible");
   1493 	}
   1494 #endif
   1495 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1496 
   1497 	sc->sc_pc = pa->pa_pc;
   1498 	sc->sc_pcitag = pa->pa_tag;
   1499 
   1500 	if (pci_dma64_available(pa))
   1501 		sc->sc_dmat = pa->pa_dmat64;
   1502 	else
   1503 		sc->sc_dmat = pa->pa_dmat;
   1504 
   1505 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1506 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1507 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1508 
   1509 	sc->sc_type = wmp->wmp_type;
   1510 	if (sc->sc_type < WM_T_82543) {
   1511 		if (sc->sc_rev < 2) {
   1512 			aprint_error_dev(sc->sc_dev,
   1513 			    "i82542 must be at least rev. 2\n");
   1514 			return;
   1515 		}
   1516 		if (sc->sc_rev < 3)
   1517 			sc->sc_type = WM_T_82542_2_0;
   1518 	}
   1519 
   1520 	/*
   1521 	 * Disable MSI for Errata:
   1522 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1523 	 *
   1524 	 *  82544: Errata 25
   1525 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1526 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1527 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1528 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1529 	 *
   1530 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1531 	 *
   1532 	 *  82571 & 82572: Errata 63
   1533 	 */
   1534 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1535 	    || (sc->sc_type == WM_T_82572))
   1536 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1537 
   1538 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1539 	    || (sc->sc_type == WM_T_82580)
   1540 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1541 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1542 		sc->sc_flags |= WM_F_NEWQUEUE;
   1543 
   1544 	/* Set device properties (mactype) */
   1545 	dict = device_properties(sc->sc_dev);
   1546 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1547 
   1548 	/*
   1549 	 * Map the device.  All devices support memory-mapped acccess,
   1550 	 * and it is really required for normal operation.
   1551 	 */
   1552 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1553 	switch (memtype) {
   1554 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1555 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1556 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1557 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1558 		break;
   1559 	default:
   1560 		memh_valid = 0;
   1561 		break;
   1562 	}
   1563 
   1564 	if (memh_valid) {
   1565 		sc->sc_st = memt;
   1566 		sc->sc_sh = memh;
   1567 		sc->sc_ss = memsize;
   1568 	} else {
   1569 		aprint_error_dev(sc->sc_dev,
   1570 		    "unable to map device registers\n");
   1571 		return;
   1572 	}
   1573 
   1574 	/*
   1575 	 * In addition, i82544 and later support I/O mapped indirect
   1576 	 * register access.  It is not desirable (nor supported in
   1577 	 * this driver) to use it for normal operation, though it is
   1578 	 * required to work around bugs in some chip versions.
   1579 	 */
   1580 	if (sc->sc_type >= WM_T_82544) {
   1581 		/* First we have to find the I/O BAR. */
   1582 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1583 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1584 			if (memtype == PCI_MAPREG_TYPE_IO)
   1585 				break;
   1586 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1587 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1588 				i += 4;	/* skip high bits, too */
   1589 		}
   1590 		if (i < PCI_MAPREG_END) {
   1591 			/*
   1592 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1593 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1594 			 * It's no problem because newer chips has no this
   1595 			 * bug.
   1596 			 *
   1597 			 * The i8254x doesn't apparently respond when the
   1598 			 * I/O BAR is 0, which looks somewhat like it's not
   1599 			 * been configured.
   1600 			 */
   1601 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1602 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1603 				aprint_error_dev(sc->sc_dev,
   1604 				    "WARNING: I/O BAR at zero.\n");
   1605 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1606 					0, &sc->sc_iot, &sc->sc_ioh,
   1607 					NULL, &sc->sc_ios) == 0) {
   1608 				sc->sc_flags |= WM_F_IOH_VALID;
   1609 			} else {
   1610 				aprint_error_dev(sc->sc_dev,
   1611 				    "WARNING: unable to map I/O space\n");
   1612 			}
   1613 		}
   1614 
   1615 	}
   1616 
   1617 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1618 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1619 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1620 	if (sc->sc_type < WM_T_82542_2_1)
   1621 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1622 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1623 
   1624 	/* power up chip */
   1625 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1626 	    NULL)) && error != EOPNOTSUPP) {
   1627 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1628 		return;
   1629 	}
   1630 
   1631 #ifndef WM_MSI_MSIX
   1632 	sc->sc_ntxqueues = 1;
   1633 	sc->sc_nrxqueues = 1;
   1634 	error = wm_alloc_txrx_queues(sc);
   1635 	if (error) {
   1636 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   1637 		    error);
   1638 		return;
   1639 	}
   1640 
   1641 	/*
   1642 	 * Map and establish our interrupt.
   1643 	 */
   1644 	if (pci_intr_map(pa, &ih)) {
   1645 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1646 		return;
   1647 	}
   1648 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1649 #ifdef WM_MPSAFE
   1650 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1651 #endif
   1652 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
   1653 	    wm_intr_legacy, sc, device_xname(sc->sc_dev));
   1654 	if (sc->sc_ihs[0] == NULL) {
   1655 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1656 		if (intrstr != NULL)
   1657 			aprint_error(" at %s", intrstr);
   1658 		aprint_error("\n");
   1659 		return;
   1660 	}
   1661 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1662 	sc->sc_nintrs = 1;
   1663 #else /* WM_MSI_MSIX */
   1664 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1665 	error = wm_alloc_txrx_queues(sc);
   1666 	if (error) {
   1667 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   1668 		    error);
   1669 		return;
   1670 	}
   1671 
   1672 	/* Allocation settings */
   1673 	max_type = PCI_INTR_TYPE_MSIX;
   1674 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1675 	counts[PCI_INTR_TYPE_MSI] = 1;
   1676 	counts[PCI_INTR_TYPE_INTX] = 1;
   1677 
   1678 alloc_retry:
   1679 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1680 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1681 		return;
   1682 	}
   1683 
   1684 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1685 		error = wm_setup_msix(sc);
   1686 		if (error) {
   1687 			pci_intr_release(pc, sc->sc_intrs,
   1688 			    counts[PCI_INTR_TYPE_MSIX]);
   1689 
   1690 			/* Setup for MSI: Disable MSI-X */
   1691 			max_type = PCI_INTR_TYPE_MSI;
   1692 			counts[PCI_INTR_TYPE_MSI] = 1;
   1693 			counts[PCI_INTR_TYPE_INTX] = 1;
   1694 			goto alloc_retry;
   1695 		}
   1696 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1697 		error = wm_setup_legacy(sc);
   1698 		if (error) {
   1699 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1700 			    counts[PCI_INTR_TYPE_MSI]);
   1701 
   1702 			/* The next try is for INTx: Disable MSI */
   1703 			max_type = PCI_INTR_TYPE_INTX;
   1704 			counts[PCI_INTR_TYPE_INTX] = 1;
   1705 			goto alloc_retry;
   1706 		}
   1707 	} else {
   1708 		error = wm_setup_legacy(sc);
   1709 		if (error) {
   1710 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1711 			    counts[PCI_INTR_TYPE_INTX]);
   1712 			return;
   1713 		}
   1714 	}
   1715 #endif /* WM_MSI_MSIX */
   1716 
   1717 	/*
   1718 	 * Check the function ID (unit number of the chip).
   1719 	 */
   1720 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1721 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1722 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1723 	    || (sc->sc_type == WM_T_82580)
   1724 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1725 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1726 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1727 	else
   1728 		sc->sc_funcid = 0;
   1729 
   1730 	/*
   1731 	 * Determine a few things about the bus we're connected to.
   1732 	 */
   1733 	if (sc->sc_type < WM_T_82543) {
   1734 		/* We don't really know the bus characteristics here. */
   1735 		sc->sc_bus_speed = 33;
   1736 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1737 		/*
   1738 		 * CSA (Communication Streaming Architecture) is about as fast
   1739 		 * a 32-bit 66MHz PCI Bus.
   1740 		 */
   1741 		sc->sc_flags |= WM_F_CSA;
   1742 		sc->sc_bus_speed = 66;
   1743 		aprint_verbose_dev(sc->sc_dev,
   1744 		    "Communication Streaming Architecture\n");
   1745 		if (sc->sc_type == WM_T_82547) {
   1746 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1747 			callout_setfunc(&sc->sc_txfifo_ch,
   1748 					wm_82547_txfifo_stall, sc);
   1749 			aprint_verbose_dev(sc->sc_dev,
   1750 			    "using 82547 Tx FIFO stall work-around\n");
   1751 		}
   1752 	} else if (sc->sc_type >= WM_T_82571) {
   1753 		sc->sc_flags |= WM_F_PCIE;
   1754 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1755 		    && (sc->sc_type != WM_T_ICH10)
   1756 		    && (sc->sc_type != WM_T_PCH)
   1757 		    && (sc->sc_type != WM_T_PCH2)
   1758 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1759 			/* ICH* and PCH* have no PCIe capability registers */
   1760 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1761 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1762 				NULL) == 0)
   1763 				aprint_error_dev(sc->sc_dev,
   1764 				    "unable to find PCIe capability\n");
   1765 		}
   1766 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1767 	} else {
   1768 		reg = CSR_READ(sc, WMREG_STATUS);
   1769 		if (reg & STATUS_BUS64)
   1770 			sc->sc_flags |= WM_F_BUS64;
   1771 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1772 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1773 
   1774 			sc->sc_flags |= WM_F_PCIX;
   1775 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1776 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1777 				aprint_error_dev(sc->sc_dev,
   1778 				    "unable to find PCIX capability\n");
   1779 			else if (sc->sc_type != WM_T_82545_3 &&
   1780 				 sc->sc_type != WM_T_82546_3) {
   1781 				/*
   1782 				 * Work around a problem caused by the BIOS
   1783 				 * setting the max memory read byte count
   1784 				 * incorrectly.
   1785 				 */
   1786 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1787 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1788 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1789 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1790 
   1791 				bytecnt =
   1792 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1793 				    PCIX_CMD_BYTECNT_SHIFT;
   1794 				maxb =
   1795 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1796 				    PCIX_STATUS_MAXB_SHIFT;
   1797 				if (bytecnt > maxb) {
   1798 					aprint_verbose_dev(sc->sc_dev,
   1799 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1800 					    512 << bytecnt, 512 << maxb);
   1801 					pcix_cmd = (pcix_cmd &
   1802 					    ~PCIX_CMD_BYTECNT_MASK) |
   1803 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1804 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1805 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1806 					    pcix_cmd);
   1807 				}
   1808 			}
   1809 		}
   1810 		/*
   1811 		 * The quad port adapter is special; it has a PCIX-PCIX
   1812 		 * bridge on the board, and can run the secondary bus at
   1813 		 * a higher speed.
   1814 		 */
   1815 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1816 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1817 								      : 66;
   1818 		} else if (sc->sc_flags & WM_F_PCIX) {
   1819 			switch (reg & STATUS_PCIXSPD_MASK) {
   1820 			case STATUS_PCIXSPD_50_66:
   1821 				sc->sc_bus_speed = 66;
   1822 				break;
   1823 			case STATUS_PCIXSPD_66_100:
   1824 				sc->sc_bus_speed = 100;
   1825 				break;
   1826 			case STATUS_PCIXSPD_100_133:
   1827 				sc->sc_bus_speed = 133;
   1828 				break;
   1829 			default:
   1830 				aprint_error_dev(sc->sc_dev,
   1831 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1832 				    reg & STATUS_PCIXSPD_MASK);
   1833 				sc->sc_bus_speed = 66;
   1834 				break;
   1835 			}
   1836 		} else
   1837 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1838 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1839 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1840 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1841 	}
   1842 
   1843 	/* clear interesting stat counters */
   1844 	CSR_READ(sc, WMREG_COLC);
   1845 	CSR_READ(sc, WMREG_RXERRC);
   1846 
   1847 	/* get PHY control from SMBus to PCIe */
   1848 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1849 	    || (sc->sc_type == WM_T_PCH_LPT))
   1850 		wm_smbustopci(sc);
   1851 
   1852 	/* Reset the chip to a known state. */
   1853 	wm_reset(sc);
   1854 
   1855 	/* Get some information about the EEPROM. */
   1856 	switch (sc->sc_type) {
   1857 	case WM_T_82542_2_0:
   1858 	case WM_T_82542_2_1:
   1859 	case WM_T_82543:
   1860 	case WM_T_82544:
   1861 		/* Microwire */
   1862 		sc->sc_nvm_wordsize = 64;
   1863 		sc->sc_nvm_addrbits = 6;
   1864 		break;
   1865 	case WM_T_82540:
   1866 	case WM_T_82545:
   1867 	case WM_T_82545_3:
   1868 	case WM_T_82546:
   1869 	case WM_T_82546_3:
   1870 		/* Microwire */
   1871 		reg = CSR_READ(sc, WMREG_EECD);
   1872 		if (reg & EECD_EE_SIZE) {
   1873 			sc->sc_nvm_wordsize = 256;
   1874 			sc->sc_nvm_addrbits = 8;
   1875 		} else {
   1876 			sc->sc_nvm_wordsize = 64;
   1877 			sc->sc_nvm_addrbits = 6;
   1878 		}
   1879 		sc->sc_flags |= WM_F_LOCK_EECD;
   1880 		break;
   1881 	case WM_T_82541:
   1882 	case WM_T_82541_2:
   1883 	case WM_T_82547:
   1884 	case WM_T_82547_2:
   1885 		sc->sc_flags |= WM_F_LOCK_EECD;
   1886 		reg = CSR_READ(sc, WMREG_EECD);
   1887 		if (reg & EECD_EE_TYPE) {
   1888 			/* SPI */
   1889 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1890 			wm_nvm_set_addrbits_size_eecd(sc);
   1891 		} else {
   1892 			/* Microwire */
   1893 			if ((reg & EECD_EE_ABITS) != 0) {
   1894 				sc->sc_nvm_wordsize = 256;
   1895 				sc->sc_nvm_addrbits = 8;
   1896 			} else {
   1897 				sc->sc_nvm_wordsize = 64;
   1898 				sc->sc_nvm_addrbits = 6;
   1899 			}
   1900 		}
   1901 		break;
   1902 	case WM_T_82571:
   1903 	case WM_T_82572:
   1904 		/* SPI */
   1905 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1906 		wm_nvm_set_addrbits_size_eecd(sc);
   1907 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1908 		break;
   1909 	case WM_T_82573:
   1910 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1911 		/* FALLTHROUGH */
   1912 	case WM_T_82574:
   1913 	case WM_T_82583:
   1914 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1915 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1916 			sc->sc_nvm_wordsize = 2048;
   1917 		} else {
   1918 			/* SPI */
   1919 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1920 			wm_nvm_set_addrbits_size_eecd(sc);
   1921 		}
   1922 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1923 		break;
   1924 	case WM_T_82575:
   1925 	case WM_T_82576:
   1926 	case WM_T_82580:
   1927 	case WM_T_I350:
   1928 	case WM_T_I354:
   1929 	case WM_T_80003:
   1930 		/* SPI */
   1931 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1932 		wm_nvm_set_addrbits_size_eecd(sc);
   1933 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1934 		    | WM_F_LOCK_SWSM;
   1935 		break;
   1936 	case WM_T_ICH8:
   1937 	case WM_T_ICH9:
   1938 	case WM_T_ICH10:
   1939 	case WM_T_PCH:
   1940 	case WM_T_PCH2:
   1941 	case WM_T_PCH_LPT:
   1942 		/* FLASH */
   1943 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1944 		sc->sc_nvm_wordsize = 2048;
   1945 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1946 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1947 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1948 			aprint_error_dev(sc->sc_dev,
   1949 			    "can't map FLASH registers\n");
   1950 			goto out;
   1951 		}
   1952 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1953 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1954 						ICH_FLASH_SECTOR_SIZE;
   1955 		sc->sc_ich8_flash_bank_size =
   1956 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1957 		sc->sc_ich8_flash_bank_size -=
   1958 		    (reg & ICH_GFPREG_BASE_MASK);
   1959 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1960 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1961 		break;
   1962 	case WM_T_I210:
   1963 	case WM_T_I211:
   1964 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1965 			wm_nvm_set_addrbits_size_eecd(sc);
   1966 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1967 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1968 		} else {
   1969 			sc->sc_nvm_wordsize = INVM_SIZE;
   1970 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1971 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1972 		}
   1973 		break;
   1974 	default:
   1975 		break;
   1976 	}
   1977 
   1978 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1979 	switch (sc->sc_type) {
   1980 	case WM_T_82571:
   1981 	case WM_T_82572:
   1982 		reg = CSR_READ(sc, WMREG_SWSM2);
   1983 		if ((reg & SWSM2_LOCK) == 0) {
   1984 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1985 			force_clear_smbi = true;
   1986 		} else
   1987 			force_clear_smbi = false;
   1988 		break;
   1989 	case WM_T_82573:
   1990 	case WM_T_82574:
   1991 	case WM_T_82583:
   1992 		force_clear_smbi = true;
   1993 		break;
   1994 	default:
   1995 		force_clear_smbi = false;
   1996 		break;
   1997 	}
   1998 	if (force_clear_smbi) {
   1999 		reg = CSR_READ(sc, WMREG_SWSM);
   2000 		if ((reg & SWSM_SMBI) != 0)
   2001 			aprint_error_dev(sc->sc_dev,
   2002 			    "Please update the Bootagent\n");
   2003 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2004 	}
   2005 
   2006 	/*
   2007 	 * Defer printing the EEPROM type until after verifying the checksum
   2008 	 * This allows the EEPROM type to be printed correctly in the case
   2009 	 * that no EEPROM is attached.
   2010 	 */
   2011 	/*
   2012 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2013 	 * this for later, so we can fail future reads from the EEPROM.
   2014 	 */
   2015 	if (wm_nvm_validate_checksum(sc)) {
   2016 		/*
   2017 		 * Read twice again because some PCI-e parts fail the
   2018 		 * first check due to the link being in sleep state.
   2019 		 */
   2020 		if (wm_nvm_validate_checksum(sc))
   2021 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2022 	}
   2023 
   2024 	/* Set device properties (macflags) */
   2025 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2026 
   2027 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2028 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2029 	else {
   2030 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2031 		    sc->sc_nvm_wordsize);
   2032 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2033 			aprint_verbose("iNVM");
   2034 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2035 			aprint_verbose("FLASH(HW)");
   2036 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2037 			aprint_verbose("FLASH");
   2038 		else {
   2039 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2040 				eetype = "SPI";
   2041 			else
   2042 				eetype = "MicroWire";
   2043 			aprint_verbose("(%d address bits) %s EEPROM",
   2044 			    sc->sc_nvm_addrbits, eetype);
   2045 		}
   2046 	}
   2047 	wm_nvm_version(sc);
   2048 	aprint_verbose("\n");
   2049 
   2050 	/* Check for I21[01] PLL workaround */
   2051 	if (sc->sc_type == WM_T_I210)
   2052 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2053 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2054 		/* NVM image release 3.25 has a workaround */
   2055 		if ((sc->sc_nvm_ver_major < 3)
   2056 		    || ((sc->sc_nvm_ver_major == 3)
   2057 			&& (sc->sc_nvm_ver_minor < 25))) {
   2058 			aprint_verbose_dev(sc->sc_dev,
   2059 			    "ROM image version %d.%d is older than 3.25\n",
   2060 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2061 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2062 		}
   2063 	}
   2064 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2065 		wm_pll_workaround_i210(sc);
   2066 
   2067 	switch (sc->sc_type) {
   2068 	case WM_T_82571:
   2069 	case WM_T_82572:
   2070 	case WM_T_82573:
   2071 	case WM_T_82574:
   2072 	case WM_T_82583:
   2073 	case WM_T_80003:
   2074 	case WM_T_ICH8:
   2075 	case WM_T_ICH9:
   2076 	case WM_T_ICH10:
   2077 	case WM_T_PCH:
   2078 	case WM_T_PCH2:
   2079 	case WM_T_PCH_LPT:
   2080 		if (wm_check_mng_mode(sc) != 0)
   2081 			wm_get_hw_control(sc);
   2082 		break;
   2083 	default:
   2084 		break;
   2085 	}
   2086 	wm_get_wakeup(sc);
   2087 	/*
   2088 	 * Read the Ethernet address from the EEPROM, if not first found
   2089 	 * in device properties.
   2090 	 */
   2091 	ea = prop_dictionary_get(dict, "mac-address");
   2092 	if (ea != NULL) {
   2093 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2094 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2095 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2096 	} else {
   2097 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2098 			aprint_error_dev(sc->sc_dev,
   2099 			    "unable to read Ethernet address\n");
   2100 			goto out;
   2101 		}
   2102 	}
   2103 
   2104 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2105 	    ether_sprintf(enaddr));
   2106 
   2107 	/*
   2108 	 * Read the config info from the EEPROM, and set up various
   2109 	 * bits in the control registers based on their contents.
   2110 	 */
   2111 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2112 	if (pn != NULL) {
   2113 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2114 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2115 	} else {
   2116 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2117 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2118 			goto out;
   2119 		}
   2120 	}
   2121 
   2122 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2123 	if (pn != NULL) {
   2124 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2125 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2126 	} else {
   2127 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2128 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2129 			goto out;
   2130 		}
   2131 	}
   2132 
   2133 	/* check for WM_F_WOL */
   2134 	switch (sc->sc_type) {
   2135 	case WM_T_82542_2_0:
   2136 	case WM_T_82542_2_1:
   2137 	case WM_T_82543:
   2138 		/* dummy? */
   2139 		eeprom_data = 0;
   2140 		apme_mask = NVM_CFG3_APME;
   2141 		break;
   2142 	case WM_T_82544:
   2143 		apme_mask = NVM_CFG2_82544_APM_EN;
   2144 		eeprom_data = cfg2;
   2145 		break;
   2146 	case WM_T_82546:
   2147 	case WM_T_82546_3:
   2148 	case WM_T_82571:
   2149 	case WM_T_82572:
   2150 	case WM_T_82573:
   2151 	case WM_T_82574:
   2152 	case WM_T_82583:
   2153 	case WM_T_80003:
   2154 	default:
   2155 		apme_mask = NVM_CFG3_APME;
   2156 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2157 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2158 		break;
   2159 	case WM_T_82575:
   2160 	case WM_T_82576:
   2161 	case WM_T_82580:
   2162 	case WM_T_I350:
   2163 	case WM_T_I354: /* XXX ok? */
   2164 	case WM_T_ICH8:
   2165 	case WM_T_ICH9:
   2166 	case WM_T_ICH10:
   2167 	case WM_T_PCH:
   2168 	case WM_T_PCH2:
   2169 	case WM_T_PCH_LPT:
   2170 		/* XXX The funcid should be checked on some devices */
   2171 		apme_mask = WUC_APME;
   2172 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2173 		break;
   2174 	}
   2175 
   2176 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2177 	if ((eeprom_data & apme_mask) != 0)
   2178 		sc->sc_flags |= WM_F_WOL;
   2179 #ifdef WM_DEBUG
   2180 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2181 		printf("WOL\n");
   2182 #endif
   2183 
   2184 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2185 		/* Check NVM for autonegotiation */
   2186 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2187 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2188 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2189 		}
   2190 	}
   2191 
   2192 	/*
   2193 	 * XXX need special handling for some multiple port cards
   2194 	 * to disable a paticular port.
   2195 	 */
   2196 
   2197 	if (sc->sc_type >= WM_T_82544) {
   2198 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2199 		if (pn != NULL) {
   2200 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2201 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2202 		} else {
   2203 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2204 				aprint_error_dev(sc->sc_dev,
   2205 				    "unable to read SWDPIN\n");
   2206 				goto out;
   2207 			}
   2208 		}
   2209 	}
   2210 
   2211 	if (cfg1 & NVM_CFG1_ILOS)
   2212 		sc->sc_ctrl |= CTRL_ILOS;
   2213 
   2214 	/*
   2215 	 * XXX
   2216 	 * This code isn't correct because pin 2 and 3 are located
   2217 	 * in different position on newer chips. Check all datasheet.
   2218 	 *
   2219 	 * Until resolve this problem, check if a chip < 82580
   2220 	 */
   2221 	if (sc->sc_type <= WM_T_82580) {
   2222 		if (sc->sc_type >= WM_T_82544) {
   2223 			sc->sc_ctrl |=
   2224 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2225 			    CTRL_SWDPIO_SHIFT;
   2226 			sc->sc_ctrl |=
   2227 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2228 			    CTRL_SWDPINS_SHIFT;
   2229 		} else {
   2230 			sc->sc_ctrl |=
   2231 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2232 			    CTRL_SWDPIO_SHIFT;
   2233 		}
   2234 	}
   2235 
   2236 	/* XXX For other than 82580? */
   2237 	if (sc->sc_type == WM_T_82580) {
   2238 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2239 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2240 		if (nvmword & __BIT(13)) {
   2241 			printf("SET ILOS\n");
   2242 			sc->sc_ctrl |= CTRL_ILOS;
   2243 		}
   2244 	}
   2245 
   2246 #if 0
   2247 	if (sc->sc_type >= WM_T_82544) {
   2248 		if (cfg1 & NVM_CFG1_IPS0)
   2249 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2250 		if (cfg1 & NVM_CFG1_IPS1)
   2251 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2252 		sc->sc_ctrl_ext |=
   2253 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2254 		    CTRL_EXT_SWDPIO_SHIFT;
   2255 		sc->sc_ctrl_ext |=
   2256 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2257 		    CTRL_EXT_SWDPINS_SHIFT;
   2258 	} else {
   2259 		sc->sc_ctrl_ext |=
   2260 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2261 		    CTRL_EXT_SWDPIO_SHIFT;
   2262 	}
   2263 #endif
   2264 
   2265 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2266 #if 0
   2267 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2268 #endif
   2269 
   2270 	if (sc->sc_type == WM_T_PCH) {
   2271 		uint16_t val;
   2272 
   2273 		/* Save the NVM K1 bit setting */
   2274 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2275 
   2276 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2277 			sc->sc_nvm_k1_enabled = 1;
   2278 		else
   2279 			sc->sc_nvm_k1_enabled = 0;
   2280 	}
   2281 
   2282 	/*
   2283 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2284 	 * media structures accordingly.
   2285 	 */
   2286 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2287 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2288 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2289 	    || sc->sc_type == WM_T_82573
   2290 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2291 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2292 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2293 	} else if (sc->sc_type < WM_T_82543 ||
   2294 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2295 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2296 			aprint_error_dev(sc->sc_dev,
   2297 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2298 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2299 		}
   2300 		wm_tbi_mediainit(sc);
   2301 	} else {
   2302 		switch (sc->sc_type) {
   2303 		case WM_T_82575:
   2304 		case WM_T_82576:
   2305 		case WM_T_82580:
   2306 		case WM_T_I350:
   2307 		case WM_T_I354:
   2308 		case WM_T_I210:
   2309 		case WM_T_I211:
   2310 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2311 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2312 			switch (link_mode) {
   2313 			case CTRL_EXT_LINK_MODE_1000KX:
   2314 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2315 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2316 				break;
   2317 			case CTRL_EXT_LINK_MODE_SGMII:
   2318 				if (wm_sgmii_uses_mdio(sc)) {
   2319 					aprint_verbose_dev(sc->sc_dev,
   2320 					    "SGMII(MDIO)\n");
   2321 					sc->sc_flags |= WM_F_SGMII;
   2322 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2323 					break;
   2324 				}
   2325 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2326 				/*FALLTHROUGH*/
   2327 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2328 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2329 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2330 					if (link_mode
   2331 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2332 						sc->sc_mediatype
   2333 						    = WM_MEDIATYPE_COPPER;
   2334 						sc->sc_flags |= WM_F_SGMII;
   2335 					} else {
   2336 						sc->sc_mediatype
   2337 						    = WM_MEDIATYPE_SERDES;
   2338 						aprint_verbose_dev(sc->sc_dev,
   2339 						    "SERDES\n");
   2340 					}
   2341 					break;
   2342 				}
   2343 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2344 					aprint_verbose_dev(sc->sc_dev,
   2345 					    "SERDES\n");
   2346 
   2347 				/* Change current link mode setting */
   2348 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2349 				switch (sc->sc_mediatype) {
   2350 				case WM_MEDIATYPE_COPPER:
   2351 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2352 					break;
   2353 				case WM_MEDIATYPE_SERDES:
   2354 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2355 					break;
   2356 				default:
   2357 					break;
   2358 				}
   2359 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2360 				break;
   2361 			case CTRL_EXT_LINK_MODE_GMII:
   2362 			default:
   2363 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2364 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2365 				break;
   2366 			}
   2367 
   2368 			reg &= ~CTRL_EXT_I2C_ENA;
   2369 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2370 				reg |= CTRL_EXT_I2C_ENA;
   2371 			else
   2372 				reg &= ~CTRL_EXT_I2C_ENA;
   2373 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2374 
   2375 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2376 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2377 			else
   2378 				wm_tbi_mediainit(sc);
   2379 			break;
   2380 		default:
   2381 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2382 				aprint_error_dev(sc->sc_dev,
   2383 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2384 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2385 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2386 		}
   2387 	}
   2388 
   2389 	ifp = &sc->sc_ethercom.ec_if;
   2390 	xname = device_xname(sc->sc_dev);
   2391 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2392 	ifp->if_softc = sc;
   2393 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2394 	ifp->if_ioctl = wm_ioctl;
   2395 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2396 		ifp->if_start = wm_nq_start;
   2397 	else
   2398 		ifp->if_start = wm_start;
   2399 	ifp->if_watchdog = wm_watchdog;
   2400 	ifp->if_init = wm_init;
   2401 	ifp->if_stop = wm_stop;
   2402 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2403 	IFQ_SET_READY(&ifp->if_snd);
   2404 
   2405 	/* Check for jumbo frame */
   2406 	switch (sc->sc_type) {
   2407 	case WM_T_82573:
   2408 		/* XXX limited to 9234 if ASPM is disabled */
   2409 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2410 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2411 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2412 		break;
   2413 	case WM_T_82571:
   2414 	case WM_T_82572:
   2415 	case WM_T_82574:
   2416 	case WM_T_82575:
   2417 	case WM_T_82576:
   2418 	case WM_T_82580:
   2419 	case WM_T_I350:
   2420 	case WM_T_I354: /* XXXX ok? */
   2421 	case WM_T_I210:
   2422 	case WM_T_I211:
   2423 	case WM_T_80003:
   2424 	case WM_T_ICH9:
   2425 	case WM_T_ICH10:
   2426 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2427 	case WM_T_PCH_LPT:
   2428 		/* XXX limited to 9234 */
   2429 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2430 		break;
   2431 	case WM_T_PCH:
   2432 		/* XXX limited to 4096 */
   2433 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2434 		break;
   2435 	case WM_T_82542_2_0:
   2436 	case WM_T_82542_2_1:
   2437 	case WM_T_82583:
   2438 	case WM_T_ICH8:
   2439 		/* No support for jumbo frame */
   2440 		break;
   2441 	default:
   2442 		/* ETHER_MAX_LEN_JUMBO */
   2443 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2444 		break;
   2445 	}
   2446 
   2447 	/* If we're a i82543 or greater, we can support VLANs. */
   2448 	if (sc->sc_type >= WM_T_82543)
   2449 		sc->sc_ethercom.ec_capabilities |=
   2450 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2451 
   2452 	/*
   2453 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2454 	 * on i82543 and later.
   2455 	 */
   2456 	if (sc->sc_type >= WM_T_82543) {
   2457 		ifp->if_capabilities |=
   2458 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2459 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2460 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2461 		    IFCAP_CSUM_TCPv6_Tx |
   2462 		    IFCAP_CSUM_UDPv6_Tx;
   2463 	}
   2464 
   2465 	/*
   2466 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2467 	 *
   2468 	 *	82541GI (8086:1076) ... no
   2469 	 *	82572EI (8086:10b9) ... yes
   2470 	 */
   2471 	if (sc->sc_type >= WM_T_82571) {
   2472 		ifp->if_capabilities |=
   2473 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2474 	}
   2475 
   2476 	/*
   2477 	 * If we're a i82544 or greater (except i82547), we can do
   2478 	 * TCP segmentation offload.
   2479 	 */
   2480 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2481 		ifp->if_capabilities |= IFCAP_TSOv4;
   2482 	}
   2483 
   2484 	if (sc->sc_type >= WM_T_82571) {
   2485 		ifp->if_capabilities |= IFCAP_TSOv6;
   2486 	}
   2487 
   2488 #ifdef WM_MPSAFE
   2489 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2490 #else
   2491 	sc->sc_core_lock = NULL;
   2492 #endif
   2493 
   2494 	/* Attach the interface. */
   2495 	if_attach(ifp);
   2496 	ether_ifattach(ifp, enaddr);
   2497 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2498 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2499 			  RND_FLAG_DEFAULT);
   2500 
   2501 #ifdef WM_EVENT_COUNTERS
   2502 	/* Attach event counters. */
   2503 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2504 	    NULL, xname, "txsstall");
   2505 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2506 	    NULL, xname, "txdstall");
   2507 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2508 	    NULL, xname, "txfifo_stall");
   2509 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2510 	    NULL, xname, "txdw");
   2511 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2512 	    NULL, xname, "txqe");
   2513 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2514 	    NULL, xname, "rxintr");
   2515 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2516 	    NULL, xname, "linkintr");
   2517 
   2518 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2519 	    NULL, xname, "rxipsum");
   2520 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2521 	    NULL, xname, "rxtusum");
   2522 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2523 	    NULL, xname, "txipsum");
   2524 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2525 	    NULL, xname, "txtusum");
   2526 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2527 	    NULL, xname, "txtusum6");
   2528 
   2529 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2530 	    NULL, xname, "txtso");
   2531 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2532 	    NULL, xname, "txtso6");
   2533 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2534 	    NULL, xname, "txtsopain");
   2535 
   2536 	for (i = 0; i < WM_NTXSEGS; i++) {
   2537 		snprintf(wm_txseg_evcnt_names[i],
   2538 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2539 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2540 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2541 	}
   2542 
   2543 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2544 	    NULL, xname, "txdrop");
   2545 
   2546 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2547 	    NULL, xname, "tu");
   2548 
   2549 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2550 	    NULL, xname, "tx_xoff");
   2551 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2552 	    NULL, xname, "tx_xon");
   2553 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2554 	    NULL, xname, "rx_xoff");
   2555 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2556 	    NULL, xname, "rx_xon");
   2557 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2558 	    NULL, xname, "rx_macctl");
   2559 #endif /* WM_EVENT_COUNTERS */
   2560 
   2561 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2562 		pmf_class_network_register(self, ifp);
   2563 	else
   2564 		aprint_error_dev(self, "couldn't establish power handler\n");
   2565 
   2566 	sc->sc_flags |= WM_F_ATTACHED;
   2567  out:
   2568 	return;
   2569 }
   2570 
   2571 /* The detach function (ca_detach) */
   2572 static int
   2573 wm_detach(device_t self, int flags __unused)
   2574 {
   2575 	struct wm_softc *sc = device_private(self);
   2576 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2577 	int i;
   2578 #ifndef WM_MPSAFE
   2579 	int s;
   2580 #endif
   2581 
   2582 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2583 		return 0;
   2584 
   2585 #ifndef WM_MPSAFE
   2586 	s = splnet();
   2587 #endif
   2588 	/* Stop the interface. Callouts are stopped in it. */
   2589 	wm_stop(ifp, 1);
   2590 
   2591 #ifndef WM_MPSAFE
   2592 	splx(s);
   2593 #endif
   2594 
   2595 	pmf_device_deregister(self);
   2596 
   2597 	/* Tell the firmware about the release */
   2598 	WM_CORE_LOCK(sc);
   2599 	wm_release_manageability(sc);
   2600 	wm_release_hw_control(sc);
   2601 	WM_CORE_UNLOCK(sc);
   2602 
   2603 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2604 
   2605 	/* Delete all remaining media. */
   2606 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2607 
   2608 	ether_ifdetach(ifp);
   2609 	if_detach(ifp);
   2610 
   2611 
   2612 	/* Unload RX dmamaps and free mbufs */
   2613 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2614 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2615 		WM_RX_LOCK(rxq);
   2616 		wm_rxdrain(rxq);
   2617 		WM_RX_UNLOCK(rxq);
   2618 	}
   2619 	/* Must unlock here */
   2620 
   2621 	wm_free_txrx_queues(sc);
   2622 
   2623 	/* Disestablish the interrupt handler */
   2624 	for (i = 0; i < sc->sc_nintrs; i++) {
   2625 		if (sc->sc_ihs[i] != NULL) {
   2626 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2627 			sc->sc_ihs[i] = NULL;
   2628 		}
   2629 	}
   2630 #ifdef WM_MSI_MSIX
   2631 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2632 #endif /* WM_MSI_MSIX */
   2633 
   2634 	/* Unmap the registers */
   2635 	if (sc->sc_ss) {
   2636 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2637 		sc->sc_ss = 0;
   2638 	}
   2639 	if (sc->sc_ios) {
   2640 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2641 		sc->sc_ios = 0;
   2642 	}
   2643 	if (sc->sc_flashs) {
   2644 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2645 		sc->sc_flashs = 0;
   2646 	}
   2647 
   2648 	if (sc->sc_core_lock)
   2649 		mutex_obj_free(sc->sc_core_lock);
   2650 
   2651 	return 0;
   2652 }
   2653 
   2654 static bool
   2655 wm_suspend(device_t self, const pmf_qual_t *qual)
   2656 {
   2657 	struct wm_softc *sc = device_private(self);
   2658 
   2659 	wm_release_manageability(sc);
   2660 	wm_release_hw_control(sc);
   2661 #ifdef WM_WOL
   2662 	wm_enable_wakeup(sc);
   2663 #endif
   2664 
   2665 	return true;
   2666 }
   2667 
   2668 static bool
   2669 wm_resume(device_t self, const pmf_qual_t *qual)
   2670 {
   2671 	struct wm_softc *sc = device_private(self);
   2672 
   2673 	wm_init_manageability(sc);
   2674 
   2675 	return true;
   2676 }
   2677 
   2678 /*
   2679  * wm_watchdog:		[ifnet interface function]
   2680  *
   2681  *	Watchdog timer handler.
   2682  */
   2683 static void
   2684 wm_watchdog(struct ifnet *ifp)
   2685 {
   2686 	struct wm_softc *sc = ifp->if_softc;
   2687 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2688 
   2689 	/*
   2690 	 * Since we're using delayed interrupts, sweep up
   2691 	 * before we report an error.
   2692 	 */
   2693 	WM_TX_LOCK(txq);
   2694 	wm_txeof(sc);
   2695 	WM_TX_UNLOCK(txq);
   2696 
   2697 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2698 #ifdef WM_DEBUG
   2699 		int i, j;
   2700 		struct wm_txsoft *txs;
   2701 #endif
   2702 		log(LOG_ERR,
   2703 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2704 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2705 		    txq->txq_next);
   2706 		ifp->if_oerrors++;
   2707 #ifdef WM_DEBUG
   2708 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2709 		    i = WM_NEXTTXS(txq, i)) {
   2710 		    txs = &txq->txq_soft[i];
   2711 		    printf("txs %d tx %d -> %d\n",
   2712 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2713 		    for (j = txs->txs_firstdesc; ;
   2714 			j = WM_NEXTTX(txq, j)) {
   2715 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2716 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2717 			printf("\t %#08x%08x\n",
   2718 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2719 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2720 			if (j == txs->txs_lastdesc)
   2721 				break;
   2722 			}
   2723 		}
   2724 #endif
   2725 		/* Reset the interface. */
   2726 		(void) wm_init(ifp);
   2727 	}
   2728 
   2729 	/* Try to get more packets going. */
   2730 	ifp->if_start(ifp);
   2731 }
   2732 
   2733 /*
   2734  * wm_tick:
   2735  *
   2736  *	One second timer, used to check link status, sweep up
   2737  *	completed transmit jobs, etc.
   2738  */
   2739 static void
   2740 wm_tick(void *arg)
   2741 {
   2742 	struct wm_softc *sc = arg;
   2743 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2744 #ifndef WM_MPSAFE
   2745 	int s;
   2746 
   2747 	s = splnet();
   2748 #endif
   2749 
   2750 	WM_CORE_LOCK(sc);
   2751 
   2752 	if (sc->sc_stopping)
   2753 		goto out;
   2754 
   2755 	if (sc->sc_type >= WM_T_82542_2_1) {
   2756 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2757 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2758 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2759 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2760 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2761 	}
   2762 
   2763 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2764 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2765 	    + CSR_READ(sc, WMREG_CRCERRS)
   2766 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2767 	    + CSR_READ(sc, WMREG_SYMERRC)
   2768 	    + CSR_READ(sc, WMREG_RXERRC)
   2769 	    + CSR_READ(sc, WMREG_SEC)
   2770 	    + CSR_READ(sc, WMREG_CEXTERR)
   2771 	    + CSR_READ(sc, WMREG_RLEC);
   2772 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2773 
   2774 	if (sc->sc_flags & WM_F_HAS_MII)
   2775 		mii_tick(&sc->sc_mii);
   2776 	else if ((sc->sc_type >= WM_T_82575)
   2777 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2778 		wm_serdes_tick(sc);
   2779 	else
   2780 		wm_tbi_tick(sc);
   2781 
   2782 out:
   2783 	WM_CORE_UNLOCK(sc);
   2784 #ifndef WM_MPSAFE
   2785 	splx(s);
   2786 #endif
   2787 
   2788 	if (!sc->sc_stopping)
   2789 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2790 }
   2791 
   2792 static int
   2793 wm_ifflags_cb(struct ethercom *ec)
   2794 {
   2795 	struct ifnet *ifp = &ec->ec_if;
   2796 	struct wm_softc *sc = ifp->if_softc;
   2797 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2798 	int rc = 0;
   2799 
   2800 	WM_CORE_LOCK(sc);
   2801 
   2802 	if (change != 0)
   2803 		sc->sc_if_flags = ifp->if_flags;
   2804 
   2805 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2806 		rc = ENETRESET;
   2807 		goto out;
   2808 	}
   2809 
   2810 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2811 		wm_set_filter(sc);
   2812 
   2813 	wm_set_vlan(sc);
   2814 
   2815 out:
   2816 	WM_CORE_UNLOCK(sc);
   2817 
   2818 	return rc;
   2819 }
   2820 
   2821 /*
   2822  * wm_ioctl:		[ifnet interface function]
   2823  *
   2824  *	Handle control requests from the operator.
   2825  */
   2826 static int
   2827 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2828 {
   2829 	struct wm_softc *sc = ifp->if_softc;
   2830 	struct ifreq *ifr = (struct ifreq *) data;
   2831 	struct ifaddr *ifa = (struct ifaddr *)data;
   2832 	struct sockaddr_dl *sdl;
   2833 	int s, error;
   2834 
   2835 #ifndef WM_MPSAFE
   2836 	s = splnet();
   2837 #endif
   2838 	switch (cmd) {
   2839 	case SIOCSIFMEDIA:
   2840 	case SIOCGIFMEDIA:
   2841 		WM_CORE_LOCK(sc);
   2842 		/* Flow control requires full-duplex mode. */
   2843 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2844 		    (ifr->ifr_media & IFM_FDX) == 0)
   2845 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2846 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2847 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2848 				/* We can do both TXPAUSE and RXPAUSE. */
   2849 				ifr->ifr_media |=
   2850 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2851 			}
   2852 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2853 		}
   2854 		WM_CORE_UNLOCK(sc);
   2855 #ifdef WM_MPSAFE
   2856 		s = splnet();
   2857 #endif
   2858 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2859 #ifdef WM_MPSAFE
   2860 		splx(s);
   2861 #endif
   2862 		break;
   2863 	case SIOCINITIFADDR:
   2864 		WM_CORE_LOCK(sc);
   2865 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2866 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2867 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2868 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2869 			/* unicast address is first multicast entry */
   2870 			wm_set_filter(sc);
   2871 			error = 0;
   2872 			WM_CORE_UNLOCK(sc);
   2873 			break;
   2874 		}
   2875 		WM_CORE_UNLOCK(sc);
   2876 		/*FALLTHROUGH*/
   2877 	default:
   2878 #ifdef WM_MPSAFE
   2879 		s = splnet();
   2880 #endif
   2881 		/* It may call wm_start, so unlock here */
   2882 		error = ether_ioctl(ifp, cmd, data);
   2883 #ifdef WM_MPSAFE
   2884 		splx(s);
   2885 #endif
   2886 		if (error != ENETRESET)
   2887 			break;
   2888 
   2889 		error = 0;
   2890 
   2891 		if (cmd == SIOCSIFCAP) {
   2892 			error = (*ifp->if_init)(ifp);
   2893 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2894 			;
   2895 		else if (ifp->if_flags & IFF_RUNNING) {
   2896 			/*
   2897 			 * Multicast list has changed; set the hardware filter
   2898 			 * accordingly.
   2899 			 */
   2900 			WM_CORE_LOCK(sc);
   2901 			wm_set_filter(sc);
   2902 			WM_CORE_UNLOCK(sc);
   2903 		}
   2904 		break;
   2905 	}
   2906 
   2907 #ifndef WM_MPSAFE
   2908 	splx(s);
   2909 #endif
   2910 	return error;
   2911 }
   2912 
   2913 /* MAC address related */
   2914 
   2915 /*
   2916  * Get the offset of MAC address and return it.
   2917  * If error occured, use offset 0.
   2918  */
   2919 static uint16_t
   2920 wm_check_alt_mac_addr(struct wm_softc *sc)
   2921 {
   2922 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2923 	uint16_t offset = NVM_OFF_MACADDR;
   2924 
   2925 	/* Try to read alternative MAC address pointer */
   2926 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2927 		return 0;
   2928 
   2929 	/* Check pointer if it's valid or not. */
   2930 	if ((offset == 0x0000) || (offset == 0xffff))
   2931 		return 0;
   2932 
   2933 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2934 	/*
   2935 	 * Check whether alternative MAC address is valid or not.
   2936 	 * Some cards have non 0xffff pointer but those don't use
   2937 	 * alternative MAC address in reality.
   2938 	 *
   2939 	 * Check whether the broadcast bit is set or not.
   2940 	 */
   2941 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2942 		if (((myea[0] & 0xff) & 0x01) == 0)
   2943 			return offset; /* Found */
   2944 
   2945 	/* Not found */
   2946 	return 0;
   2947 }
   2948 
   2949 static int
   2950 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2951 {
   2952 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2953 	uint16_t offset = NVM_OFF_MACADDR;
   2954 	int do_invert = 0;
   2955 
   2956 	switch (sc->sc_type) {
   2957 	case WM_T_82580:
   2958 	case WM_T_I350:
   2959 	case WM_T_I354:
   2960 		/* EEPROM Top Level Partitioning */
   2961 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2962 		break;
   2963 	case WM_T_82571:
   2964 	case WM_T_82575:
   2965 	case WM_T_82576:
   2966 	case WM_T_80003:
   2967 	case WM_T_I210:
   2968 	case WM_T_I211:
   2969 		offset = wm_check_alt_mac_addr(sc);
   2970 		if (offset == 0)
   2971 			if ((sc->sc_funcid & 0x01) == 1)
   2972 				do_invert = 1;
   2973 		break;
   2974 	default:
   2975 		if ((sc->sc_funcid & 0x01) == 1)
   2976 			do_invert = 1;
   2977 		break;
   2978 	}
   2979 
   2980 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2981 		myea) != 0)
   2982 		goto bad;
   2983 
   2984 	enaddr[0] = myea[0] & 0xff;
   2985 	enaddr[1] = myea[0] >> 8;
   2986 	enaddr[2] = myea[1] & 0xff;
   2987 	enaddr[3] = myea[1] >> 8;
   2988 	enaddr[4] = myea[2] & 0xff;
   2989 	enaddr[5] = myea[2] >> 8;
   2990 
   2991 	/*
   2992 	 * Toggle the LSB of the MAC address on the second port
   2993 	 * of some dual port cards.
   2994 	 */
   2995 	if (do_invert != 0)
   2996 		enaddr[5] ^= 1;
   2997 
   2998 	return 0;
   2999 
   3000  bad:
   3001 	return -1;
   3002 }
   3003 
   3004 /*
   3005  * wm_set_ral:
   3006  *
   3007  *	Set an entery in the receive address list.
   3008  */
   3009 static void
   3010 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3011 {
   3012 	uint32_t ral_lo, ral_hi;
   3013 
   3014 	if (enaddr != NULL) {
   3015 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3016 		    (enaddr[3] << 24);
   3017 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3018 		ral_hi |= RAL_AV;
   3019 	} else {
   3020 		ral_lo = 0;
   3021 		ral_hi = 0;
   3022 	}
   3023 
   3024 	if (sc->sc_type >= WM_T_82544) {
   3025 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3026 		    ral_lo);
   3027 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3028 		    ral_hi);
   3029 	} else {
   3030 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3031 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3032 	}
   3033 }
   3034 
   3035 /*
   3036  * wm_mchash:
   3037  *
   3038  *	Compute the hash of the multicast address for the 4096-bit
   3039  *	multicast filter.
   3040  */
   3041 static uint32_t
   3042 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3043 {
   3044 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3045 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3046 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3047 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3048 	uint32_t hash;
   3049 
   3050 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3051 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3052 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3053 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3054 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3055 		return (hash & 0x3ff);
   3056 	}
   3057 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3058 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3059 
   3060 	return (hash & 0xfff);
   3061 }
   3062 
   3063 /*
   3064  * wm_set_filter:
   3065  *
   3066  *	Set up the receive filter.
   3067  */
   3068 static void
   3069 wm_set_filter(struct wm_softc *sc)
   3070 {
   3071 	struct ethercom *ec = &sc->sc_ethercom;
   3072 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3073 	struct ether_multi *enm;
   3074 	struct ether_multistep step;
   3075 	bus_addr_t mta_reg;
   3076 	uint32_t hash, reg, bit;
   3077 	int i, size;
   3078 
   3079 	if (sc->sc_type >= WM_T_82544)
   3080 		mta_reg = WMREG_CORDOVA_MTA;
   3081 	else
   3082 		mta_reg = WMREG_MTA;
   3083 
   3084 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3085 
   3086 	if (ifp->if_flags & IFF_BROADCAST)
   3087 		sc->sc_rctl |= RCTL_BAM;
   3088 	if (ifp->if_flags & IFF_PROMISC) {
   3089 		sc->sc_rctl |= RCTL_UPE;
   3090 		goto allmulti;
   3091 	}
   3092 
   3093 	/*
   3094 	 * Set the station address in the first RAL slot, and
   3095 	 * clear the remaining slots.
   3096 	 */
   3097 	if (sc->sc_type == WM_T_ICH8)
   3098 		size = WM_RAL_TABSIZE_ICH8 -1;
   3099 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3100 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3101 	    || (sc->sc_type == WM_T_PCH_LPT))
   3102 		size = WM_RAL_TABSIZE_ICH8;
   3103 	else if (sc->sc_type == WM_T_82575)
   3104 		size = WM_RAL_TABSIZE_82575;
   3105 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3106 		size = WM_RAL_TABSIZE_82576;
   3107 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3108 		size = WM_RAL_TABSIZE_I350;
   3109 	else
   3110 		size = WM_RAL_TABSIZE;
   3111 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3112 	for (i = 1; i < size; i++)
   3113 		wm_set_ral(sc, NULL, i);
   3114 
   3115 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3116 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3117 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3118 		size = WM_ICH8_MC_TABSIZE;
   3119 	else
   3120 		size = WM_MC_TABSIZE;
   3121 	/* Clear out the multicast table. */
   3122 	for (i = 0; i < size; i++)
   3123 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3124 
   3125 	ETHER_FIRST_MULTI(step, ec, enm);
   3126 	while (enm != NULL) {
   3127 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3128 			/*
   3129 			 * We must listen to a range of multicast addresses.
   3130 			 * For now, just accept all multicasts, rather than
   3131 			 * trying to set only those filter bits needed to match
   3132 			 * the range.  (At this time, the only use of address
   3133 			 * ranges is for IP multicast routing, for which the
   3134 			 * range is big enough to require all bits set.)
   3135 			 */
   3136 			goto allmulti;
   3137 		}
   3138 
   3139 		hash = wm_mchash(sc, enm->enm_addrlo);
   3140 
   3141 		reg = (hash >> 5);
   3142 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3143 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3144 		    || (sc->sc_type == WM_T_PCH2)
   3145 		    || (sc->sc_type == WM_T_PCH_LPT))
   3146 			reg &= 0x1f;
   3147 		else
   3148 			reg &= 0x7f;
   3149 		bit = hash & 0x1f;
   3150 
   3151 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3152 		hash |= 1U << bit;
   3153 
   3154 		/* XXX Hardware bug?? */
   3155 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3156 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3157 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3158 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3159 		} else
   3160 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3161 
   3162 		ETHER_NEXT_MULTI(step, enm);
   3163 	}
   3164 
   3165 	ifp->if_flags &= ~IFF_ALLMULTI;
   3166 	goto setit;
   3167 
   3168  allmulti:
   3169 	ifp->if_flags |= IFF_ALLMULTI;
   3170 	sc->sc_rctl |= RCTL_MPE;
   3171 
   3172  setit:
   3173 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3174 }
   3175 
   3176 /* Reset and init related */
   3177 
   3178 static void
   3179 wm_set_vlan(struct wm_softc *sc)
   3180 {
   3181 	/* Deal with VLAN enables. */
   3182 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3183 		sc->sc_ctrl |= CTRL_VME;
   3184 	else
   3185 		sc->sc_ctrl &= ~CTRL_VME;
   3186 
   3187 	/* Write the control registers. */
   3188 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3189 }
   3190 
   3191 static void
   3192 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3193 {
   3194 	uint32_t gcr;
   3195 	pcireg_t ctrl2;
   3196 
   3197 	gcr = CSR_READ(sc, WMREG_GCR);
   3198 
   3199 	/* Only take action if timeout value is defaulted to 0 */
   3200 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3201 		goto out;
   3202 
   3203 	if ((gcr & GCR_CAP_VER2) == 0) {
   3204 		gcr |= GCR_CMPL_TMOUT_10MS;
   3205 		goto out;
   3206 	}
   3207 
   3208 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3209 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3210 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3211 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3212 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3213 
   3214 out:
   3215 	/* Disable completion timeout resend */
   3216 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3217 
   3218 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3219 }
   3220 
   3221 void
   3222 wm_get_auto_rd_done(struct wm_softc *sc)
   3223 {
   3224 	int i;
   3225 
   3226 	/* wait for eeprom to reload */
   3227 	switch (sc->sc_type) {
   3228 	case WM_T_82571:
   3229 	case WM_T_82572:
   3230 	case WM_T_82573:
   3231 	case WM_T_82574:
   3232 	case WM_T_82583:
   3233 	case WM_T_82575:
   3234 	case WM_T_82576:
   3235 	case WM_T_82580:
   3236 	case WM_T_I350:
   3237 	case WM_T_I354:
   3238 	case WM_T_I210:
   3239 	case WM_T_I211:
   3240 	case WM_T_80003:
   3241 	case WM_T_ICH8:
   3242 	case WM_T_ICH9:
   3243 		for (i = 0; i < 10; i++) {
   3244 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3245 				break;
   3246 			delay(1000);
   3247 		}
   3248 		if (i == 10) {
   3249 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3250 			    "complete\n", device_xname(sc->sc_dev));
   3251 		}
   3252 		break;
   3253 	default:
   3254 		break;
   3255 	}
   3256 }
   3257 
   3258 void
   3259 wm_lan_init_done(struct wm_softc *sc)
   3260 {
   3261 	uint32_t reg = 0;
   3262 	int i;
   3263 
   3264 	/* wait for eeprom to reload */
   3265 	switch (sc->sc_type) {
   3266 	case WM_T_ICH10:
   3267 	case WM_T_PCH:
   3268 	case WM_T_PCH2:
   3269 	case WM_T_PCH_LPT:
   3270 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3271 			reg = CSR_READ(sc, WMREG_STATUS);
   3272 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3273 				break;
   3274 			delay(100);
   3275 		}
   3276 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3277 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3278 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3279 		}
   3280 		break;
   3281 	default:
   3282 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3283 		    __func__);
   3284 		break;
   3285 	}
   3286 
   3287 	reg &= ~STATUS_LAN_INIT_DONE;
   3288 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3289 }
   3290 
   3291 void
   3292 wm_get_cfg_done(struct wm_softc *sc)
   3293 {
   3294 	int mask;
   3295 	uint32_t reg;
   3296 	int i;
   3297 
   3298 	/* wait for eeprom to reload */
   3299 	switch (sc->sc_type) {
   3300 	case WM_T_82542_2_0:
   3301 	case WM_T_82542_2_1:
   3302 		/* null */
   3303 		break;
   3304 	case WM_T_82543:
   3305 	case WM_T_82544:
   3306 	case WM_T_82540:
   3307 	case WM_T_82545:
   3308 	case WM_T_82545_3:
   3309 	case WM_T_82546:
   3310 	case WM_T_82546_3:
   3311 	case WM_T_82541:
   3312 	case WM_T_82541_2:
   3313 	case WM_T_82547:
   3314 	case WM_T_82547_2:
   3315 	case WM_T_82573:
   3316 	case WM_T_82574:
   3317 	case WM_T_82583:
   3318 		/* generic */
   3319 		delay(10*1000);
   3320 		break;
   3321 	case WM_T_80003:
   3322 	case WM_T_82571:
   3323 	case WM_T_82572:
   3324 	case WM_T_82575:
   3325 	case WM_T_82576:
   3326 	case WM_T_82580:
   3327 	case WM_T_I350:
   3328 	case WM_T_I354:
   3329 	case WM_T_I210:
   3330 	case WM_T_I211:
   3331 		if (sc->sc_type == WM_T_82571) {
   3332 			/* Only 82571 shares port 0 */
   3333 			mask = EEMNGCTL_CFGDONE_0;
   3334 		} else
   3335 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3336 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3337 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3338 				break;
   3339 			delay(1000);
   3340 		}
   3341 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3342 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3343 				device_xname(sc->sc_dev), __func__));
   3344 		}
   3345 		break;
   3346 	case WM_T_ICH8:
   3347 	case WM_T_ICH9:
   3348 	case WM_T_ICH10:
   3349 	case WM_T_PCH:
   3350 	case WM_T_PCH2:
   3351 	case WM_T_PCH_LPT:
   3352 		delay(10*1000);
   3353 		if (sc->sc_type >= WM_T_ICH10)
   3354 			wm_lan_init_done(sc);
   3355 		else
   3356 			wm_get_auto_rd_done(sc);
   3357 
   3358 		reg = CSR_READ(sc, WMREG_STATUS);
   3359 		if ((reg & STATUS_PHYRA) != 0)
   3360 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3361 		break;
   3362 	default:
   3363 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3364 		    __func__);
   3365 		break;
   3366 	}
   3367 }
   3368 
   3369 /* Init hardware bits */
   3370 void
   3371 wm_initialize_hardware_bits(struct wm_softc *sc)
   3372 {
   3373 	uint32_t tarc0, tarc1, reg;
   3374 
   3375 	/* For 82571 variant, 80003 and ICHs */
   3376 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3377 	    || (sc->sc_type >= WM_T_80003)) {
   3378 
   3379 		/* Transmit Descriptor Control 0 */
   3380 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3381 		reg |= TXDCTL_COUNT_DESC;
   3382 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3383 
   3384 		/* Transmit Descriptor Control 1 */
   3385 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3386 		reg |= TXDCTL_COUNT_DESC;
   3387 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3388 
   3389 		/* TARC0 */
   3390 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3391 		switch (sc->sc_type) {
   3392 		case WM_T_82571:
   3393 		case WM_T_82572:
   3394 		case WM_T_82573:
   3395 		case WM_T_82574:
   3396 		case WM_T_82583:
   3397 		case WM_T_80003:
   3398 			/* Clear bits 30..27 */
   3399 			tarc0 &= ~__BITS(30, 27);
   3400 			break;
   3401 		default:
   3402 			break;
   3403 		}
   3404 
   3405 		switch (sc->sc_type) {
   3406 		case WM_T_82571:
   3407 		case WM_T_82572:
   3408 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3409 
   3410 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3411 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3412 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3413 			/* 8257[12] Errata No.7 */
   3414 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3415 
   3416 			/* TARC1 bit 28 */
   3417 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3418 				tarc1 &= ~__BIT(28);
   3419 			else
   3420 				tarc1 |= __BIT(28);
   3421 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3422 
   3423 			/*
   3424 			 * 8257[12] Errata No.13
   3425 			 * Disable Dyamic Clock Gating.
   3426 			 */
   3427 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3428 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3429 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3430 			break;
   3431 		case WM_T_82573:
   3432 		case WM_T_82574:
   3433 		case WM_T_82583:
   3434 			if ((sc->sc_type == WM_T_82574)
   3435 			    || (sc->sc_type == WM_T_82583))
   3436 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3437 
   3438 			/* Extended Device Control */
   3439 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3440 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3441 			reg |= __BIT(22);	/* Set bit 22 */
   3442 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3443 
   3444 			/* Device Control */
   3445 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3446 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3447 
   3448 			/* PCIe Control Register */
   3449 			/*
   3450 			 * 82573 Errata (unknown).
   3451 			 *
   3452 			 * 82574 Errata 25 and 82583 Errata 12
   3453 			 * "Dropped Rx Packets":
   3454 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3455 			 */
   3456 			reg = CSR_READ(sc, WMREG_GCR);
   3457 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3458 			CSR_WRITE(sc, WMREG_GCR, reg);
   3459 
   3460 			if ((sc->sc_type == WM_T_82574)
   3461 			    || (sc->sc_type == WM_T_82583)) {
   3462 				/*
   3463 				 * Document says this bit must be set for
   3464 				 * proper operation.
   3465 				 */
   3466 				reg = CSR_READ(sc, WMREG_GCR);
   3467 				reg |= __BIT(22);
   3468 				CSR_WRITE(sc, WMREG_GCR, reg);
   3469 
   3470 				/*
   3471 				 * Apply workaround for hardware errata
   3472 				 * documented in errata docs Fixes issue where
   3473 				 * some error prone or unreliable PCIe
   3474 				 * completions are occurring, particularly
   3475 				 * with ASPM enabled. Without fix, issue can
   3476 				 * cause Tx timeouts.
   3477 				 */
   3478 				reg = CSR_READ(sc, WMREG_GCR2);
   3479 				reg |= __BIT(0);
   3480 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3481 			}
   3482 			break;
   3483 		case WM_T_80003:
   3484 			/* TARC0 */
   3485 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3486 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3487 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3488 
   3489 			/* TARC1 bit 28 */
   3490 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3491 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3492 				tarc1 &= ~__BIT(28);
   3493 			else
   3494 				tarc1 |= __BIT(28);
   3495 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3496 			break;
   3497 		case WM_T_ICH8:
   3498 		case WM_T_ICH9:
   3499 		case WM_T_ICH10:
   3500 		case WM_T_PCH:
   3501 		case WM_T_PCH2:
   3502 		case WM_T_PCH_LPT:
   3503 			/* TARC 0 */
   3504 			if (sc->sc_type == WM_T_ICH8) {
   3505 				/* Set TARC0 bits 29 and 28 */
   3506 				tarc0 |= __BITS(29, 28);
   3507 			}
   3508 			/* Set TARC0 bits 23,24,26,27 */
   3509 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3510 
   3511 			/* CTRL_EXT */
   3512 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3513 			reg |= __BIT(22);	/* Set bit 22 */
   3514 			/*
   3515 			 * Enable PHY low-power state when MAC is at D3
   3516 			 * w/o WoL
   3517 			 */
   3518 			if (sc->sc_type >= WM_T_PCH)
   3519 				reg |= CTRL_EXT_PHYPDEN;
   3520 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3521 
   3522 			/* TARC1 */
   3523 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3524 			/* bit 28 */
   3525 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3526 				tarc1 &= ~__BIT(28);
   3527 			else
   3528 				tarc1 |= __BIT(28);
   3529 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3530 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3531 
   3532 			/* Device Status */
   3533 			if (sc->sc_type == WM_T_ICH8) {
   3534 				reg = CSR_READ(sc, WMREG_STATUS);
   3535 				reg &= ~__BIT(31);
   3536 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3537 
   3538 			}
   3539 
   3540 			/*
   3541 			 * Work-around descriptor data corruption issue during
   3542 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3543 			 * capability.
   3544 			 */
   3545 			reg = CSR_READ(sc, WMREG_RFCTL);
   3546 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3547 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3548 			break;
   3549 		default:
   3550 			break;
   3551 		}
   3552 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3553 
   3554 		/*
   3555 		 * 8257[12] Errata No.52 and some others.
   3556 		 * Avoid RSS Hash Value bug.
   3557 		 */
   3558 		switch (sc->sc_type) {
   3559 		case WM_T_82571:
   3560 		case WM_T_82572:
   3561 		case WM_T_82573:
   3562 		case WM_T_80003:
   3563 		case WM_T_ICH8:
   3564 			reg = CSR_READ(sc, WMREG_RFCTL);
   3565 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3566 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3567 			break;
   3568 		default:
   3569 			break;
   3570 		}
   3571 	}
   3572 }
   3573 
   3574 static uint32_t
   3575 wm_rxpbs_adjust_82580(uint32_t val)
   3576 {
   3577 	uint32_t rv = 0;
   3578 
   3579 	if (val < __arraycount(wm_82580_rxpbs_table))
   3580 		rv = wm_82580_rxpbs_table[val];
   3581 
   3582 	return rv;
   3583 }
   3584 
   3585 /*
   3586  * wm_reset:
   3587  *
   3588  *	Reset the i82542 chip.
   3589  */
   3590 static void
   3591 wm_reset(struct wm_softc *sc)
   3592 {
   3593 	int phy_reset = 0;
   3594 	int i, error = 0;
   3595 	uint32_t reg, mask;
   3596 
   3597 	/*
   3598 	 * Allocate on-chip memory according to the MTU size.
   3599 	 * The Packet Buffer Allocation register must be written
   3600 	 * before the chip is reset.
   3601 	 */
   3602 	switch (sc->sc_type) {
   3603 	case WM_T_82547:
   3604 	case WM_T_82547_2:
   3605 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3606 		    PBA_22K : PBA_30K;
   3607 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3608 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3609 			txq->txq_fifo_head = 0;
   3610 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3611 			txq->txq_fifo_size =
   3612 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3613 			txq->txq_fifo_stall = 0;
   3614 		}
   3615 		break;
   3616 	case WM_T_82571:
   3617 	case WM_T_82572:
   3618 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3619 	case WM_T_80003:
   3620 		sc->sc_pba = PBA_32K;
   3621 		break;
   3622 	case WM_T_82573:
   3623 		sc->sc_pba = PBA_12K;
   3624 		break;
   3625 	case WM_T_82574:
   3626 	case WM_T_82583:
   3627 		sc->sc_pba = PBA_20K;
   3628 		break;
   3629 	case WM_T_82576:
   3630 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3631 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3632 		break;
   3633 	case WM_T_82580:
   3634 	case WM_T_I350:
   3635 	case WM_T_I354:
   3636 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3637 		break;
   3638 	case WM_T_I210:
   3639 	case WM_T_I211:
   3640 		sc->sc_pba = PBA_34K;
   3641 		break;
   3642 	case WM_T_ICH8:
   3643 		/* Workaround for a bit corruption issue in FIFO memory */
   3644 		sc->sc_pba = PBA_8K;
   3645 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3646 		break;
   3647 	case WM_T_ICH9:
   3648 	case WM_T_ICH10:
   3649 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3650 		    PBA_14K : PBA_10K;
   3651 		break;
   3652 	case WM_T_PCH:
   3653 	case WM_T_PCH2:
   3654 	case WM_T_PCH_LPT:
   3655 		sc->sc_pba = PBA_26K;
   3656 		break;
   3657 	default:
   3658 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3659 		    PBA_40K : PBA_48K;
   3660 		break;
   3661 	}
   3662 	/*
   3663 	 * Only old or non-multiqueue devices have the PBA register
   3664 	 * XXX Need special handling for 82575.
   3665 	 */
   3666 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3667 	    || (sc->sc_type == WM_T_82575))
   3668 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3669 
   3670 	/* Prevent the PCI-E bus from sticking */
   3671 	if (sc->sc_flags & WM_F_PCIE) {
   3672 		int timeout = 800;
   3673 
   3674 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3675 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3676 
   3677 		while (timeout--) {
   3678 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3679 			    == 0)
   3680 				break;
   3681 			delay(100);
   3682 		}
   3683 	}
   3684 
   3685 	/* Set the completion timeout for interface */
   3686 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3687 	    || (sc->sc_type == WM_T_82580)
   3688 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3689 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3690 		wm_set_pcie_completion_timeout(sc);
   3691 
   3692 	/* Clear interrupt */
   3693 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3694 	if (sc->sc_nintrs > 1) {
   3695 		if (sc->sc_type != WM_T_82574) {
   3696 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3697 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3698 		} else {
   3699 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3700 		}
   3701 	}
   3702 
   3703 	/* Stop the transmit and receive processes. */
   3704 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3705 	sc->sc_rctl &= ~RCTL_EN;
   3706 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3707 	CSR_WRITE_FLUSH(sc);
   3708 
   3709 	/* XXX set_tbi_sbp_82543() */
   3710 
   3711 	delay(10*1000);
   3712 
   3713 	/* Must acquire the MDIO ownership before MAC reset */
   3714 	switch (sc->sc_type) {
   3715 	case WM_T_82573:
   3716 	case WM_T_82574:
   3717 	case WM_T_82583:
   3718 		error = wm_get_hw_semaphore_82573(sc);
   3719 		break;
   3720 	default:
   3721 		break;
   3722 	}
   3723 
   3724 	/*
   3725 	 * 82541 Errata 29? & 82547 Errata 28?
   3726 	 * See also the description about PHY_RST bit in CTRL register
   3727 	 * in 8254x_GBe_SDM.pdf.
   3728 	 */
   3729 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3730 		CSR_WRITE(sc, WMREG_CTRL,
   3731 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3732 		CSR_WRITE_FLUSH(sc);
   3733 		delay(5000);
   3734 	}
   3735 
   3736 	switch (sc->sc_type) {
   3737 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3738 	case WM_T_82541:
   3739 	case WM_T_82541_2:
   3740 	case WM_T_82547:
   3741 	case WM_T_82547_2:
   3742 		/*
   3743 		 * On some chipsets, a reset through a memory-mapped write
   3744 		 * cycle can cause the chip to reset before completing the
   3745 		 * write cycle.  This causes major headache that can be
   3746 		 * avoided by issuing the reset via indirect register writes
   3747 		 * through I/O space.
   3748 		 *
   3749 		 * So, if we successfully mapped the I/O BAR at attach time,
   3750 		 * use that.  Otherwise, try our luck with a memory-mapped
   3751 		 * reset.
   3752 		 */
   3753 		if (sc->sc_flags & WM_F_IOH_VALID)
   3754 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3755 		else
   3756 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3757 		break;
   3758 	case WM_T_82545_3:
   3759 	case WM_T_82546_3:
   3760 		/* Use the shadow control register on these chips. */
   3761 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3762 		break;
   3763 	case WM_T_80003:
   3764 		mask = swfwphysem[sc->sc_funcid];
   3765 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3766 		wm_get_swfw_semaphore(sc, mask);
   3767 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3768 		wm_put_swfw_semaphore(sc, mask);
   3769 		break;
   3770 	case WM_T_ICH8:
   3771 	case WM_T_ICH9:
   3772 	case WM_T_ICH10:
   3773 	case WM_T_PCH:
   3774 	case WM_T_PCH2:
   3775 	case WM_T_PCH_LPT:
   3776 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3777 		if (wm_check_reset_block(sc) == 0) {
   3778 			/*
   3779 			 * Gate automatic PHY configuration by hardware on
   3780 			 * non-managed 82579
   3781 			 */
   3782 			if ((sc->sc_type == WM_T_PCH2)
   3783 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3784 				!= 0))
   3785 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3786 
   3787 
   3788 			reg |= CTRL_PHY_RESET;
   3789 			phy_reset = 1;
   3790 		}
   3791 		wm_get_swfwhw_semaphore(sc);
   3792 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3793 		/* Don't insert a completion barrier when reset */
   3794 		delay(20*1000);
   3795 		wm_put_swfwhw_semaphore(sc);
   3796 		break;
   3797 	case WM_T_82580:
   3798 	case WM_T_I350:
   3799 	case WM_T_I354:
   3800 	case WM_T_I210:
   3801 	case WM_T_I211:
   3802 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3803 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3804 			CSR_WRITE_FLUSH(sc);
   3805 		delay(5000);
   3806 		break;
   3807 	case WM_T_82542_2_0:
   3808 	case WM_T_82542_2_1:
   3809 	case WM_T_82543:
   3810 	case WM_T_82540:
   3811 	case WM_T_82545:
   3812 	case WM_T_82546:
   3813 	case WM_T_82571:
   3814 	case WM_T_82572:
   3815 	case WM_T_82573:
   3816 	case WM_T_82574:
   3817 	case WM_T_82575:
   3818 	case WM_T_82576:
   3819 	case WM_T_82583:
   3820 	default:
   3821 		/* Everything else can safely use the documented method. */
   3822 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3823 		break;
   3824 	}
   3825 
   3826 	/* Must release the MDIO ownership after MAC reset */
   3827 	switch (sc->sc_type) {
   3828 	case WM_T_82573:
   3829 	case WM_T_82574:
   3830 	case WM_T_82583:
   3831 		if (error == 0)
   3832 			wm_put_hw_semaphore_82573(sc);
   3833 		break;
   3834 	default:
   3835 		break;
   3836 	}
   3837 
   3838 	if (phy_reset != 0)
   3839 		wm_get_cfg_done(sc);
   3840 
   3841 	/* reload EEPROM */
   3842 	switch (sc->sc_type) {
   3843 	case WM_T_82542_2_0:
   3844 	case WM_T_82542_2_1:
   3845 	case WM_T_82543:
   3846 	case WM_T_82544:
   3847 		delay(10);
   3848 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3849 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3850 		CSR_WRITE_FLUSH(sc);
   3851 		delay(2000);
   3852 		break;
   3853 	case WM_T_82540:
   3854 	case WM_T_82545:
   3855 	case WM_T_82545_3:
   3856 	case WM_T_82546:
   3857 	case WM_T_82546_3:
   3858 		delay(5*1000);
   3859 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3860 		break;
   3861 	case WM_T_82541:
   3862 	case WM_T_82541_2:
   3863 	case WM_T_82547:
   3864 	case WM_T_82547_2:
   3865 		delay(20000);
   3866 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3867 		break;
   3868 	case WM_T_82571:
   3869 	case WM_T_82572:
   3870 	case WM_T_82573:
   3871 	case WM_T_82574:
   3872 	case WM_T_82583:
   3873 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3874 			delay(10);
   3875 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3876 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3877 			CSR_WRITE_FLUSH(sc);
   3878 		}
   3879 		/* check EECD_EE_AUTORD */
   3880 		wm_get_auto_rd_done(sc);
   3881 		/*
   3882 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3883 		 * is set.
   3884 		 */
   3885 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3886 		    || (sc->sc_type == WM_T_82583))
   3887 			delay(25*1000);
   3888 		break;
   3889 	case WM_T_82575:
   3890 	case WM_T_82576:
   3891 	case WM_T_82580:
   3892 	case WM_T_I350:
   3893 	case WM_T_I354:
   3894 	case WM_T_I210:
   3895 	case WM_T_I211:
   3896 	case WM_T_80003:
   3897 		/* check EECD_EE_AUTORD */
   3898 		wm_get_auto_rd_done(sc);
   3899 		break;
   3900 	case WM_T_ICH8:
   3901 	case WM_T_ICH9:
   3902 	case WM_T_ICH10:
   3903 	case WM_T_PCH:
   3904 	case WM_T_PCH2:
   3905 	case WM_T_PCH_LPT:
   3906 		break;
   3907 	default:
   3908 		panic("%s: unknown type\n", __func__);
   3909 	}
   3910 
   3911 	/* Check whether EEPROM is present or not */
   3912 	switch (sc->sc_type) {
   3913 	case WM_T_82575:
   3914 	case WM_T_82576:
   3915 	case WM_T_82580:
   3916 	case WM_T_I350:
   3917 	case WM_T_I354:
   3918 	case WM_T_ICH8:
   3919 	case WM_T_ICH9:
   3920 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3921 			/* Not found */
   3922 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3923 			if (sc->sc_type == WM_T_82575)
   3924 				wm_reset_init_script_82575(sc);
   3925 		}
   3926 		break;
   3927 	default:
   3928 		break;
   3929 	}
   3930 
   3931 	if ((sc->sc_type == WM_T_82580)
   3932 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3933 		/* clear global device reset status bit */
   3934 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3935 	}
   3936 
   3937 	/* Clear any pending interrupt events. */
   3938 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3939 	reg = CSR_READ(sc, WMREG_ICR);
   3940 	if (sc->sc_nintrs > 1) {
   3941 		if (sc->sc_type != WM_T_82574) {
   3942 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3943 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3944 		} else
   3945 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3946 	}
   3947 
   3948 	/* reload sc_ctrl */
   3949 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3950 
   3951 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3952 		wm_set_eee_i350(sc);
   3953 
   3954 	/* dummy read from WUC */
   3955 	if (sc->sc_type == WM_T_PCH)
   3956 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3957 	/*
   3958 	 * For PCH, this write will make sure that any noise will be detected
   3959 	 * as a CRC error and be dropped rather than show up as a bad packet
   3960 	 * to the DMA engine
   3961 	 */
   3962 	if (sc->sc_type == WM_T_PCH)
   3963 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3964 
   3965 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3966 		CSR_WRITE(sc, WMREG_WUC, 0);
   3967 
   3968 	wm_reset_mdicnfg_82580(sc);
   3969 
   3970 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3971 		wm_pll_workaround_i210(sc);
   3972 }
   3973 
   3974 /*
   3975  * wm_add_rxbuf:
   3976  *
   3977  *	Add a receive buffer to the indiciated descriptor.
   3978  */
   3979 static int
   3980 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   3981 {
   3982 	struct wm_softc *sc = rxq->rxq_sc;
   3983 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   3984 	struct mbuf *m;
   3985 	int error;
   3986 
   3987 	KASSERT(WM_RX_LOCKED(rxq));
   3988 
   3989 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3990 	if (m == NULL)
   3991 		return ENOBUFS;
   3992 
   3993 	MCLGET(m, M_DONTWAIT);
   3994 	if ((m->m_flags & M_EXT) == 0) {
   3995 		m_freem(m);
   3996 		return ENOBUFS;
   3997 	}
   3998 
   3999 	if (rxs->rxs_mbuf != NULL)
   4000 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4001 
   4002 	rxs->rxs_mbuf = m;
   4003 
   4004 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4005 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4006 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   4007 	if (error) {
   4008 		/* XXX XXX XXX */
   4009 		aprint_error_dev(sc->sc_dev,
   4010 		    "unable to load rx DMA map %d, error = %d\n",
   4011 		    idx, error);
   4012 		panic("wm_add_rxbuf");
   4013 	}
   4014 
   4015 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4016 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4017 
   4018 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4019 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4020 			wm_init_rxdesc(rxq, idx);
   4021 	} else
   4022 		wm_init_rxdesc(rxq, idx);
   4023 
   4024 	return 0;
   4025 }
   4026 
   4027 /*
   4028  * wm_rxdrain:
   4029  *
   4030  *	Drain the receive queue.
   4031  */
   4032 static void
   4033 wm_rxdrain(struct wm_rxqueue *rxq)
   4034 {
   4035 	struct wm_softc *sc = rxq->rxq_sc;
   4036 	struct wm_rxsoft *rxs;
   4037 	int i;
   4038 
   4039 	KASSERT(WM_RX_LOCKED(rxq));
   4040 
   4041 	for (i = 0; i < WM_NRXDESC; i++) {
   4042 		rxs = &rxq->rxq_soft[i];
   4043 		if (rxs->rxs_mbuf != NULL) {
   4044 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4045 			m_freem(rxs->rxs_mbuf);
   4046 			rxs->rxs_mbuf = NULL;
   4047 		}
   4048 	}
   4049 }
   4050 
   4051 
   4052 /*
   4053  * XXX copy from FreeBSD's sys/net/rss_config.c
   4054  */
   4055 /*
   4056  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4057  * effectiveness may be limited by algorithm choice and available entropy
   4058  * during the boot.
   4059  *
   4060  * XXXRW: And that we don't randomize it yet!
   4061  *
   4062  * This is the default Microsoft RSS specification key which is also
   4063  * the Chelsio T5 firmware default key.
   4064  */
   4065 #define RSS_KEYSIZE 40
   4066 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4067 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4068 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4069 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4070 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4071 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4072 };
   4073 
   4074 /*
   4075  * Caller must pass an array of size sizeof(rss_key).
   4076  *
   4077  * XXX
   4078  * As if_ixgbe may use this function, this function should not be
   4079  * if_wm specific function.
   4080  */
   4081 static void
   4082 wm_rss_getkey(uint8_t *key)
   4083 {
   4084 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4085 }
   4086 
   4087 /*
   4088  * Setup registers for RSS.
   4089  *
   4090  * XXX not yet VMDq support
   4091  */
   4092 static void
   4093 wm_init_rss(struct wm_softc *sc)
   4094 {
   4095 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4096 	int i;
   4097 
   4098 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4099 		int qid, reta_ent;
   4100 
   4101 		qid  = i % sc->sc_nrxqueues;
   4102 		switch(sc->sc_type) {
   4103 		case WM_T_82574:
   4104 			reta_ent = __SHIFTIN(qid,
   4105 			    RETA_ENT_QINDEX_MASK_82574);
   4106 			break;
   4107 		case WM_T_82575:
   4108 			reta_ent = __SHIFTIN(qid,
   4109 			    RETA_ENT_QINDEX1_MASK_82575);
   4110 			break;
   4111 		default:
   4112 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4113 			break;
   4114 		}
   4115 
   4116 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4117 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4118 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4119 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4120 	}
   4121 
   4122 	wm_rss_getkey((uint8_t *)rss_key);
   4123 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4124 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4125 
   4126 	if (sc->sc_type == WM_T_82574)
   4127 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4128 	else
   4129 		mrqc = MRQC_ENABLE_RSS_MQ;
   4130 
   4131 	/* XXXX
   4132 	 * The same as FreeBSD igb.
   4133 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4134 	 */
   4135 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4136 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4137 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4138 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4139 
   4140 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4141 }
   4142 
   4143 #ifdef WM_MSI_MSIX
   4144 
   4145 /*
   4146  * Adjust TX and RX queue numbers which the system actulally uses.
   4147  *
   4148  * The numbers are affected by below parameters.
   4149  *     - The nubmer of hardware queues
   4150  *     - The number of MSI-X vectors (= "nvectors" argument)
   4151  *     - ncpu
   4152  */
   4153 static void
   4154 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4155 {
   4156 	int hw_ntxqueues, hw_nrxqueues;
   4157 
   4158 	if (nvectors < 3) {
   4159 		sc->sc_ntxqueues = 1;
   4160 		sc->sc_nrxqueues = 1;
   4161 		return;
   4162 	}
   4163 
   4164 	switch(sc->sc_type) {
   4165 	case WM_T_82572:
   4166 		hw_ntxqueues = 2;
   4167 		hw_nrxqueues = 2;
   4168 		break;
   4169 	case WM_T_82574:
   4170 		hw_ntxqueues = 2;
   4171 		hw_nrxqueues = 2;
   4172 		break;
   4173 	case WM_T_82575:
   4174 		hw_ntxqueues = 4;
   4175 		hw_nrxqueues = 4;
   4176 		break;
   4177 	case WM_T_82576:
   4178 		hw_ntxqueues = 16;
   4179 		hw_nrxqueues = 16;
   4180 		break;
   4181 	case WM_T_82580:
   4182 	case WM_T_I350:
   4183 	case WM_T_I354:
   4184 		hw_ntxqueues = 8;
   4185 		hw_nrxqueues = 8;
   4186 		break;
   4187 	case WM_T_I210:
   4188 		hw_ntxqueues = 4;
   4189 		hw_nrxqueues = 4;
   4190 		break;
   4191 	case WM_T_I211:
   4192 		hw_ntxqueues = 2;
   4193 		hw_nrxqueues = 2;
   4194 		break;
   4195 		/*
   4196 		 * As below ethernet controllers does not support MSI-X,
   4197 		 * this driver let them not use multiqueue.
   4198 		 *     - WM_T_80003
   4199 		 *     - WM_T_ICH8
   4200 		 *     - WM_T_ICH9
   4201 		 *     - WM_T_ICH10
   4202 		 *     - WM_T_PCH
   4203 		 *     - WM_T_PCH2
   4204 		 *     - WM_T_PCH_LPT
   4205 		 */
   4206 	default:
   4207 		hw_ntxqueues = 1;
   4208 		hw_nrxqueues = 1;
   4209 		break;
   4210 	}
   4211 
   4212 	/*
   4213 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4214 	 * the number of queues used actually.
   4215 	 *
   4216 	 * XXX
   4217 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4218 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4219 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4220 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4221 	 * such a way.
   4222 	 */
   4223 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4224 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4225 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4226 	} else {
   4227 		sc->sc_ntxqueues = hw_ntxqueues;
   4228 		sc->sc_nrxqueues = hw_nrxqueues;
   4229 	}
   4230 
   4231 	/*
   4232 	 * As queues more then cpus cannot improve scaling, we limit
   4233 	 * the number of queues used actually.
   4234 	 */
   4235 	if (ncpu < sc->sc_ntxqueues)
   4236 		sc->sc_ntxqueues = ncpu;
   4237 	if (ncpu < sc->sc_nrxqueues)
   4238 		sc->sc_nrxqueues = ncpu;
   4239 
   4240 	/* XXX Currently, this driver supports RX multiqueue only. */
   4241 	sc->sc_ntxqueues = 1;
   4242 }
   4243 
   4244 /*
   4245  * Both single interrupt MSI and INTx can use this function.
   4246  */
   4247 static int
   4248 wm_setup_legacy(struct wm_softc *sc)
   4249 {
   4250 	pci_chipset_tag_t pc = sc->sc_pc;
   4251 	const char *intrstr = NULL;
   4252 	char intrbuf[PCI_INTRSTR_LEN];
   4253 
   4254 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4255 	    sizeof(intrbuf));
   4256 #ifdef WM_MPSAFE
   4257 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4258 #endif
   4259 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4260 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4261 	if (sc->sc_ihs[0] == NULL) {
   4262 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4263 		    (pci_intr_type(sc->sc_intrs[0])
   4264 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4265 		return ENOMEM;
   4266 	}
   4267 
   4268 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4269 	sc->sc_nintrs = 1;
   4270 	return 0;
   4271 }
   4272 
   4273 static int
   4274 wm_setup_msix(struct wm_softc *sc)
   4275 {
   4276 	void *vih;
   4277 	kcpuset_t *affinity;
   4278 	int qidx, error, intr_idx, tx_established, rx_established;
   4279 	pci_chipset_tag_t pc = sc->sc_pc;
   4280 	const char *intrstr = NULL;
   4281 	char intrbuf[PCI_INTRSTR_LEN];
   4282 	char intr_xname[INTRDEVNAMEBUF];
   4283 
   4284 	kcpuset_create(&affinity, false);
   4285 	intr_idx = 0;
   4286 
   4287 	/*
   4288 	 * TX
   4289 	 */
   4290 	tx_established = 0;
   4291 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4292 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4293 
   4294 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4295 		    sizeof(intrbuf));
   4296 #ifdef WM_MPSAFE
   4297 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4298 		    PCI_INTR_MPSAFE, true);
   4299 #endif
   4300 		memset(intr_xname, 0, sizeof(intr_xname));
   4301 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4302 		    device_xname(sc->sc_dev), qidx);
   4303 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4304 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4305 		if (vih == NULL) {
   4306 			aprint_error_dev(sc->sc_dev,
   4307 			    "unable to establish MSI-X(for TX)%s%s\n",
   4308 			    intrstr ? " at " : "",
   4309 			    intrstr ? intrstr : "");
   4310 
   4311 			goto fail_0;
   4312 		}
   4313 		kcpuset_zero(affinity);
   4314 		/* Round-robin affinity */
   4315 		kcpuset_set(affinity, intr_idx % ncpu);
   4316 		error = interrupt_distribute(vih, affinity, NULL);
   4317 		if (error == 0) {
   4318 			aprint_normal_dev(sc->sc_dev,
   4319 			    "for TX interrupting at %s affinity to %u\n",
   4320 			    intrstr, intr_idx % ncpu);
   4321 		} else {
   4322 			aprint_normal_dev(sc->sc_dev,
   4323 			    "for TX interrupting at %s\n", intrstr);
   4324 		}
   4325 		sc->sc_ihs[intr_idx] = vih;
   4326 		txq->txq_id = qidx;
   4327 		txq->txq_intr_idx = intr_idx;
   4328 
   4329 		tx_established++;
   4330 		intr_idx++;
   4331 	}
   4332 
   4333 	/*
   4334 	 * RX
   4335 	 */
   4336 	rx_established = 0;
   4337 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4338 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4339 
   4340 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4341 		    sizeof(intrbuf));
   4342 #ifdef WM_MPSAFE
   4343 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4344 		    PCI_INTR_MPSAFE, true);
   4345 #endif
   4346 		memset(intr_xname, 0, sizeof(intr_xname));
   4347 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4348 		    device_xname(sc->sc_dev), qidx);
   4349 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4350 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4351 		if (vih == NULL) {
   4352 			aprint_error_dev(sc->sc_dev,
   4353 			    "unable to establish MSI-X(for RX)%s%s\n",
   4354 			    intrstr ? " at " : "",
   4355 			    intrstr ? intrstr : "");
   4356 
   4357 			goto fail_1;
   4358 		}
   4359 		kcpuset_zero(affinity);
   4360 		/* Round-robin affinity */
   4361 		kcpuset_set(affinity, intr_idx % ncpu);
   4362 		error = interrupt_distribute(vih, affinity, NULL);
   4363 		if (error == 0) {
   4364 			aprint_normal_dev(sc->sc_dev,
   4365 			    "for RX interrupting at %s affinity to %u\n",
   4366 			    intrstr, intr_idx % ncpu);
   4367 		} else {
   4368 			aprint_normal_dev(sc->sc_dev,
   4369 			    "for RX interrupting at %s\n", intrstr);
   4370 		}
   4371 		sc->sc_ihs[intr_idx] = vih;
   4372 		rxq->rxq_id = qidx;
   4373 		rxq->rxq_intr_idx = intr_idx;
   4374 
   4375 		rx_established++;
   4376 		intr_idx++;
   4377 	}
   4378 
   4379 	/*
   4380 	 * LINK
   4381 	 */
   4382 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4383 	    sizeof(intrbuf));
   4384 #ifdef WM_MPSAFE
   4385 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4386 	    PCI_INTR_MPSAFE, true);
   4387 #endif
   4388 	memset(intr_xname, 0, sizeof(intr_xname));
   4389 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4390 	    device_xname(sc->sc_dev));
   4391 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4392 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4393 	if (vih == NULL) {
   4394 		aprint_error_dev(sc->sc_dev,
   4395 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4396 		    intrstr ? " at " : "",
   4397 		    intrstr ? intrstr : "");
   4398 
   4399 		goto fail_1;
   4400 	}
   4401 	/* keep default affinity to LINK interrupt */
   4402 	aprint_normal_dev(sc->sc_dev,
   4403 	    "for LINK interrupting at %s\n", intrstr);
   4404 	sc->sc_ihs[intr_idx] = vih;
   4405 	sc->sc_link_intr_idx = intr_idx;
   4406 
   4407 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4408 	kcpuset_destroy(affinity);
   4409 	return 0;
   4410 
   4411  fail_1:
   4412 	for (qidx = 0; qidx < rx_established; qidx++) {
   4413 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4414 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
   4415 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4416 	}
   4417  fail_0:
   4418 	for (qidx = 0; qidx < tx_established; qidx++) {
   4419 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4420 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
   4421 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4422 	}
   4423 
   4424 	kcpuset_destroy(affinity);
   4425 	return ENOMEM;
   4426 }
   4427 #endif
   4428 
   4429 /*
   4430  * wm_init:		[ifnet interface function]
   4431  *
   4432  *	Initialize the interface.
   4433  */
   4434 static int
   4435 wm_init(struct ifnet *ifp)
   4436 {
   4437 	struct wm_softc *sc = ifp->if_softc;
   4438 	int ret;
   4439 
   4440 	WM_CORE_LOCK(sc);
   4441 	ret = wm_init_locked(ifp);
   4442 	WM_CORE_UNLOCK(sc);
   4443 
   4444 	return ret;
   4445 }
   4446 
   4447 static int
   4448 wm_init_locked(struct ifnet *ifp)
   4449 {
   4450 	struct wm_softc *sc = ifp->if_softc;
   4451 	int i, j, trynum, error = 0;
   4452 	uint32_t reg;
   4453 
   4454 	KASSERT(WM_CORE_LOCKED(sc));
   4455 	/*
   4456 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4457 	 * There is a small but measurable benefit to avoiding the adjusment
   4458 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4459 	 * on such platforms.  One possibility is that the DMA itself is
   4460 	 * slightly more efficient if the front of the entire packet (instead
   4461 	 * of the front of the headers) is aligned.
   4462 	 *
   4463 	 * Note we must always set align_tweak to 0 if we are using
   4464 	 * jumbo frames.
   4465 	 */
   4466 #ifdef __NO_STRICT_ALIGNMENT
   4467 	sc->sc_align_tweak = 0;
   4468 #else
   4469 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4470 		sc->sc_align_tweak = 0;
   4471 	else
   4472 		sc->sc_align_tweak = 2;
   4473 #endif /* __NO_STRICT_ALIGNMENT */
   4474 
   4475 	/* Cancel any pending I/O. */
   4476 	wm_stop_locked(ifp, 0);
   4477 
   4478 	/* update statistics before reset */
   4479 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4480 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4481 
   4482 	/* Reset the chip to a known state. */
   4483 	wm_reset(sc);
   4484 
   4485 	switch (sc->sc_type) {
   4486 	case WM_T_82571:
   4487 	case WM_T_82572:
   4488 	case WM_T_82573:
   4489 	case WM_T_82574:
   4490 	case WM_T_82583:
   4491 	case WM_T_80003:
   4492 	case WM_T_ICH8:
   4493 	case WM_T_ICH9:
   4494 	case WM_T_ICH10:
   4495 	case WM_T_PCH:
   4496 	case WM_T_PCH2:
   4497 	case WM_T_PCH_LPT:
   4498 		if (wm_check_mng_mode(sc) != 0)
   4499 			wm_get_hw_control(sc);
   4500 		break;
   4501 	default:
   4502 		break;
   4503 	}
   4504 
   4505 	/* Init hardware bits */
   4506 	wm_initialize_hardware_bits(sc);
   4507 
   4508 	/* Reset the PHY. */
   4509 	if (sc->sc_flags & WM_F_HAS_MII)
   4510 		wm_gmii_reset(sc);
   4511 
   4512 	/* Calculate (E)ITR value */
   4513 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4514 		sc->sc_itr = 450;	/* For EITR */
   4515 	} else if (sc->sc_type >= WM_T_82543) {
   4516 		/*
   4517 		 * Set up the interrupt throttling register (units of 256ns)
   4518 		 * Note that a footnote in Intel's documentation says this
   4519 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4520 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4521 		 * that that is also true for the 1024ns units of the other
   4522 		 * interrupt-related timer registers -- so, really, we ought
   4523 		 * to divide this value by 4 when the link speed is low.
   4524 		 *
   4525 		 * XXX implement this division at link speed change!
   4526 		 */
   4527 
   4528 		/*
   4529 		 * For N interrupts/sec, set this value to:
   4530 		 * 1000000000 / (N * 256).  Note that we set the
   4531 		 * absolute and packet timer values to this value
   4532 		 * divided by 4 to get "simple timer" behavior.
   4533 		 */
   4534 
   4535 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4536 	}
   4537 
   4538 	error = wm_init_txrx_queues(sc);
   4539 	if (error)
   4540 		goto out;
   4541 
   4542 	/*
   4543 	 * Clear out the VLAN table -- we don't use it (yet).
   4544 	 */
   4545 	CSR_WRITE(sc, WMREG_VET, 0);
   4546 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4547 		trynum = 10; /* Due to hw errata */
   4548 	else
   4549 		trynum = 1;
   4550 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4551 		for (j = 0; j < trynum; j++)
   4552 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4553 
   4554 	/*
   4555 	 * Set up flow-control parameters.
   4556 	 *
   4557 	 * XXX Values could probably stand some tuning.
   4558 	 */
   4559 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4560 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4561 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4562 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4563 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4564 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4565 	}
   4566 
   4567 	sc->sc_fcrtl = FCRTL_DFLT;
   4568 	if (sc->sc_type < WM_T_82543) {
   4569 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4570 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4571 	} else {
   4572 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4573 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4574 	}
   4575 
   4576 	if (sc->sc_type == WM_T_80003)
   4577 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4578 	else
   4579 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4580 
   4581 	/* Writes the control register. */
   4582 	wm_set_vlan(sc);
   4583 
   4584 	if (sc->sc_flags & WM_F_HAS_MII) {
   4585 		int val;
   4586 
   4587 		switch (sc->sc_type) {
   4588 		case WM_T_80003:
   4589 		case WM_T_ICH8:
   4590 		case WM_T_ICH9:
   4591 		case WM_T_ICH10:
   4592 		case WM_T_PCH:
   4593 		case WM_T_PCH2:
   4594 		case WM_T_PCH_LPT:
   4595 			/*
   4596 			 * Set the mac to wait the maximum time between each
   4597 			 * iteration and increase the max iterations when
   4598 			 * polling the phy; this fixes erroneous timeouts at
   4599 			 * 10Mbps.
   4600 			 */
   4601 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4602 			    0xFFFF);
   4603 			val = wm_kmrn_readreg(sc,
   4604 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4605 			val |= 0x3F;
   4606 			wm_kmrn_writereg(sc,
   4607 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4608 			break;
   4609 		default:
   4610 			break;
   4611 		}
   4612 
   4613 		if (sc->sc_type == WM_T_80003) {
   4614 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4615 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4616 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4617 
   4618 			/* Bypass RX and TX FIFO's */
   4619 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4620 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4621 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4622 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4623 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4624 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4625 		}
   4626 	}
   4627 #if 0
   4628 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4629 #endif
   4630 
   4631 	/* Set up checksum offload parameters. */
   4632 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4633 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4634 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4635 		reg |= RXCSUM_IPOFL;
   4636 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4637 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4638 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4639 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4640 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4641 
   4642 	/* Set up MSI-X */
   4643 	if (sc->sc_nintrs > 1) {
   4644 		uint32_t ivar;
   4645 
   4646 		if (sc->sc_type == WM_T_82575) {
   4647 			/* Interrupt control */
   4648 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4649 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4650 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4651 
   4652 			/* TX */
   4653 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4654 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4655 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4656 				    EITR_TX_QUEUE(txq->txq_id));
   4657 			}
   4658 			/* RX */
   4659 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4660 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4661 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4662 				    EITR_RX_QUEUE(rxq->rxq_id));
   4663 			}
   4664 			/* Link status */
   4665 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4666 			    EITR_OTHER);
   4667 		} else if (sc->sc_type == WM_T_82574) {
   4668 			/* Interrupt control */
   4669 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4670 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4671 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4672 
   4673 			ivar = 0;
   4674 			/* TX */
   4675 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4676 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4677 				ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
   4678 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4679 			}
   4680 			/* RX */
   4681 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4682 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4683 				ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
   4684 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4685 			}
   4686 			/* Link status */
   4687 			ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
   4688 			    IVAR_OTHER_MASK);
   4689 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4690 		} else {
   4691 			/* Interrupt control */
   4692 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
   4693 			    | GPIE_MULTI_MSIX | GPIE_EIAME
   4694 			    | GPIE_PBA);
   4695 
   4696 			switch (sc->sc_type) {
   4697 			case WM_T_82580:
   4698 			case WM_T_I350:
   4699 			case WM_T_I354:
   4700 			case WM_T_I210:
   4701 			case WM_T_I211:
   4702 				/* TX */
   4703 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4704 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4705 					int qid = txq->txq_id;
   4706 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4707 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4708 					ivar |= __SHIFTIN(
   4709 						(txq->txq_intr_idx | IVAR_VALID),
   4710 						IVAR_TX_MASK_Q(qid));
   4711 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4712 				}
   4713 
   4714 				/* RX */
   4715 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4716 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4717 					int qid = rxq->rxq_id;
   4718 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4719 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4720 					ivar |= __SHIFTIN(
   4721 						(rxq->rxq_intr_idx | IVAR_VALID),
   4722 						IVAR_RX_MASK_Q(qid));
   4723 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4724 				}
   4725 				break;
   4726 			case WM_T_82576:
   4727 				/* TX */
   4728 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4729 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4730 					int qid = txq->txq_id;
   4731 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4732 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4733 					ivar |= __SHIFTIN(
   4734 						(txq->txq_intr_idx | IVAR_VALID),
   4735 						IVAR_TX_MASK_Q_82576(qid));
   4736 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4737 				}
   4738 
   4739 				/* RX */
   4740 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4741 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4742 					int qid = rxq->rxq_id;
   4743 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4744 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4745 					ivar |= __SHIFTIN(
   4746 						(rxq->rxq_intr_idx | IVAR_VALID),
   4747 						IVAR_RX_MASK_Q_82576(qid));
   4748 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4749 				}
   4750 				break;
   4751 			default:
   4752 				break;
   4753 			}
   4754 
   4755 			/* Link status */
   4756 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4757 			    IVAR_MISC_OTHER);
   4758 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4759 		}
   4760 
   4761 		if (sc->sc_nrxqueues > 1) {
   4762 			wm_init_rss(sc);
   4763 
   4764 			/*
   4765 			** NOTE: Receive Full-Packet Checksum Offload
   4766 			** is mutually exclusive with Multiqueue. However
   4767 			** this is not the same as TCP/IP checksums which
   4768 			** still work.
   4769 			*/
   4770 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4771 			reg |= RXCSUM_PCSD;
   4772 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4773 		}
   4774 	}
   4775 
   4776 	/* Set up the interrupt registers. */
   4777 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4778 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4779 	    ICR_RXO | ICR_RXT0;
   4780 	if (sc->sc_nintrs > 1) {
   4781 		uint32_t mask;
   4782 		switch (sc->sc_type) {
   4783 		case WM_T_82574:
   4784 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4785 			    WMREG_EIAC_82574_MSIX_MASK);
   4786 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4787 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4788 			break;
   4789 		default:
   4790 			if (sc->sc_type == WM_T_82575) {
   4791 				mask = 0;
   4792 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4793 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4794 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4795 				}
   4796 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4797 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4798 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4799 				}
   4800 				mask |= EITR_OTHER;
   4801 			} else {
   4802 				mask = 0;
   4803 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4804 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4805 					mask |= 1 << txq->txq_intr_idx;
   4806 				}
   4807 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4808 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4809 					mask |= 1 << rxq->rxq_intr_idx;
   4810 				}
   4811 				mask |= 1 << sc->sc_link_intr_idx;
   4812 			}
   4813 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4814 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4815 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4816 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4817 			break;
   4818 		}
   4819 	} else
   4820 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4821 
   4822 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4823 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4824 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4825 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4826 		reg |= KABGTXD_BGSQLBIAS;
   4827 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4828 	}
   4829 
   4830 	/* Set up the inter-packet gap. */
   4831 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4832 
   4833 	if (sc->sc_type >= WM_T_82543) {
   4834 		/*
   4835 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4836 		 * the multi queue function with MSI-X.
   4837 		 */
   4838 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4839 			int qidx;
   4840 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4841 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4842 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4843 				    sc->sc_itr);
   4844 			}
   4845 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4846 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4847 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4848 				    sc->sc_itr);
   4849 			}
   4850 			/*
   4851 			 * Link interrupts occur much less than TX
   4852 			 * interrupts and RX interrupts. So, we don't
   4853 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4854 			 * FreeBSD's if_igb.
   4855 			 */
   4856 		} else
   4857 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4858 	}
   4859 
   4860 	/* Set the VLAN ethernetype. */
   4861 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4862 
   4863 	/*
   4864 	 * Set up the transmit control register; we start out with
   4865 	 * a collision distance suitable for FDX, but update it whe
   4866 	 * we resolve the media type.
   4867 	 */
   4868 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4869 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4870 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4871 	if (sc->sc_type >= WM_T_82571)
   4872 		sc->sc_tctl |= TCTL_MULR;
   4873 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4874 
   4875 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4876 		/* Write TDT after TCTL.EN is set. See the document. */
   4877 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4878 	}
   4879 
   4880 	if (sc->sc_type == WM_T_80003) {
   4881 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4882 		reg &= ~TCTL_EXT_GCEX_MASK;
   4883 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4884 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4885 	}
   4886 
   4887 	/* Set the media. */
   4888 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4889 		goto out;
   4890 
   4891 	/* Configure for OS presence */
   4892 	wm_init_manageability(sc);
   4893 
   4894 	/*
   4895 	 * Set up the receive control register; we actually program
   4896 	 * the register when we set the receive filter.  Use multicast
   4897 	 * address offset type 0.
   4898 	 *
   4899 	 * Only the i82544 has the ability to strip the incoming
   4900 	 * CRC, so we don't enable that feature.
   4901 	 */
   4902 	sc->sc_mchash_type = 0;
   4903 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4904 	    | RCTL_MO(sc->sc_mchash_type);
   4905 
   4906 	/*
   4907 	 * The I350 has a bug where it always strips the CRC whether
   4908 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4909 	 */
   4910 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4911 	    || (sc->sc_type == WM_T_I210))
   4912 		sc->sc_rctl |= RCTL_SECRC;
   4913 
   4914 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4915 	    && (ifp->if_mtu > ETHERMTU)) {
   4916 		sc->sc_rctl |= RCTL_LPE;
   4917 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4918 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4919 	}
   4920 
   4921 	if (MCLBYTES == 2048) {
   4922 		sc->sc_rctl |= RCTL_2k;
   4923 	} else {
   4924 		if (sc->sc_type >= WM_T_82543) {
   4925 			switch (MCLBYTES) {
   4926 			case 4096:
   4927 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4928 				break;
   4929 			case 8192:
   4930 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4931 				break;
   4932 			case 16384:
   4933 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4934 				break;
   4935 			default:
   4936 				panic("wm_init: MCLBYTES %d unsupported",
   4937 				    MCLBYTES);
   4938 				break;
   4939 			}
   4940 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4941 	}
   4942 
   4943 	/* Set the receive filter. */
   4944 	wm_set_filter(sc);
   4945 
   4946 	/* Enable ECC */
   4947 	switch (sc->sc_type) {
   4948 	case WM_T_82571:
   4949 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4950 		reg |= PBA_ECC_CORR_EN;
   4951 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4952 		break;
   4953 	case WM_T_PCH_LPT:
   4954 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4955 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4956 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4957 
   4958 		reg = CSR_READ(sc, WMREG_CTRL);
   4959 		reg |= CTRL_MEHE;
   4960 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4961 		break;
   4962 	default:
   4963 		break;
   4964 	}
   4965 
   4966 	/* On 575 and later set RDT only if RX enabled */
   4967 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4968 		int qidx;
   4969 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4970 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4971 			for (i = 0; i < WM_NRXDESC; i++) {
   4972 				WM_RX_LOCK(rxq);
   4973 				wm_init_rxdesc(rxq, i);
   4974 				WM_RX_UNLOCK(rxq);
   4975 
   4976 			}
   4977 		}
   4978 	}
   4979 
   4980 	sc->sc_stopping = false;
   4981 
   4982 	/* Start the one second link check clock. */
   4983 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4984 
   4985 	/* ...all done! */
   4986 	ifp->if_flags |= IFF_RUNNING;
   4987 	ifp->if_flags &= ~IFF_OACTIVE;
   4988 
   4989  out:
   4990 	sc->sc_if_flags = ifp->if_flags;
   4991 	if (error)
   4992 		log(LOG_ERR, "%s: interface not running\n",
   4993 		    device_xname(sc->sc_dev));
   4994 	return error;
   4995 }
   4996 
   4997 /*
   4998  * wm_stop:		[ifnet interface function]
   4999  *
   5000  *	Stop transmission on the interface.
   5001  */
   5002 static void
   5003 wm_stop(struct ifnet *ifp, int disable)
   5004 {
   5005 	struct wm_softc *sc = ifp->if_softc;
   5006 
   5007 	WM_CORE_LOCK(sc);
   5008 	wm_stop_locked(ifp, disable);
   5009 	WM_CORE_UNLOCK(sc);
   5010 }
   5011 
   5012 static void
   5013 wm_stop_locked(struct ifnet *ifp, int disable)
   5014 {
   5015 	struct wm_softc *sc = ifp->if_softc;
   5016 	struct wm_txsoft *txs;
   5017 	int i, qidx;
   5018 
   5019 	KASSERT(WM_CORE_LOCKED(sc));
   5020 
   5021 	sc->sc_stopping = true;
   5022 
   5023 	/* Stop the one second clock. */
   5024 	callout_stop(&sc->sc_tick_ch);
   5025 
   5026 	/* Stop the 82547 Tx FIFO stall check timer. */
   5027 	if (sc->sc_type == WM_T_82547)
   5028 		callout_stop(&sc->sc_txfifo_ch);
   5029 
   5030 	if (sc->sc_flags & WM_F_HAS_MII) {
   5031 		/* Down the MII. */
   5032 		mii_down(&sc->sc_mii);
   5033 	} else {
   5034 #if 0
   5035 		/* Should we clear PHY's status properly? */
   5036 		wm_reset(sc);
   5037 #endif
   5038 	}
   5039 
   5040 	/* Stop the transmit and receive processes. */
   5041 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5042 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5043 	sc->sc_rctl &= ~RCTL_EN;
   5044 
   5045 	/*
   5046 	 * Clear the interrupt mask to ensure the device cannot assert its
   5047 	 * interrupt line.
   5048 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5049 	 * service any currently pending or shared interrupt.
   5050 	 */
   5051 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5052 	sc->sc_icr = 0;
   5053 	if (sc->sc_nintrs > 1) {
   5054 		if (sc->sc_type != WM_T_82574) {
   5055 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5056 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5057 		} else
   5058 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5059 	}
   5060 
   5061 	/* Release any queued transmit buffers. */
   5062 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5063 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5064 		WM_TX_LOCK(txq);
   5065 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5066 			txs = &txq->txq_soft[i];
   5067 			if (txs->txs_mbuf != NULL) {
   5068 				bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5069 				m_freem(txs->txs_mbuf);
   5070 				txs->txs_mbuf = NULL;
   5071 			}
   5072 		}
   5073 		WM_TX_UNLOCK(txq);
   5074 	}
   5075 
   5076 	/* Mark the interface as down and cancel the watchdog timer. */
   5077 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5078 	ifp->if_timer = 0;
   5079 
   5080 	if (disable) {
   5081 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5082 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5083 			WM_RX_LOCK(rxq);
   5084 			wm_rxdrain(rxq);
   5085 			WM_RX_UNLOCK(rxq);
   5086 		}
   5087 	}
   5088 
   5089 #if 0 /* notyet */
   5090 	if (sc->sc_type >= WM_T_82544)
   5091 		CSR_WRITE(sc, WMREG_WUC, 0);
   5092 #endif
   5093 }
   5094 
   5095 static void
   5096 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5097 {
   5098 	struct mbuf *m;
   5099 	int i;
   5100 
   5101 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5102 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5103 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5104 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5105 		    m->m_data, m->m_len, m->m_flags);
   5106 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5107 	    i, i == 1 ? "" : "s");
   5108 }
   5109 
   5110 /*
   5111  * wm_82547_txfifo_stall:
   5112  *
   5113  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5114  *	reset the FIFO pointers, and restart packet transmission.
   5115  */
   5116 static void
   5117 wm_82547_txfifo_stall(void *arg)
   5118 {
   5119 	struct wm_softc *sc = arg;
   5120 	struct wm_txqueue *txq = sc->sc_txq;
   5121 #ifndef WM_MPSAFE
   5122 	int s;
   5123 
   5124 	s = splnet();
   5125 #endif
   5126 	WM_TX_LOCK(txq);
   5127 
   5128 	if (sc->sc_stopping)
   5129 		goto out;
   5130 
   5131 	if (txq->txq_fifo_stall) {
   5132 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5133 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5134 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5135 			/*
   5136 			 * Packets have drained.  Stop transmitter, reset
   5137 			 * FIFO pointers, restart transmitter, and kick
   5138 			 * the packet queue.
   5139 			 */
   5140 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5141 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5142 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5143 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5144 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5145 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5146 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5147 			CSR_WRITE_FLUSH(sc);
   5148 
   5149 			txq->txq_fifo_head = 0;
   5150 			txq->txq_fifo_stall = 0;
   5151 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5152 		} else {
   5153 			/*
   5154 			 * Still waiting for packets to drain; try again in
   5155 			 * another tick.
   5156 			 */
   5157 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5158 		}
   5159 	}
   5160 
   5161 out:
   5162 	WM_TX_UNLOCK(txq);
   5163 #ifndef WM_MPSAFE
   5164 	splx(s);
   5165 #endif
   5166 }
   5167 
   5168 /*
   5169  * wm_82547_txfifo_bugchk:
   5170  *
   5171  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5172  *	prevent enqueueing a packet that would wrap around the end
   5173  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5174  *
   5175  *	We do this by checking the amount of space before the end
   5176  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5177  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5178  *	the internal FIFO pointers to the beginning, and restart
   5179  *	transmission on the interface.
   5180  */
   5181 #define	WM_FIFO_HDR		0x10
   5182 #define	WM_82547_PAD_LEN	0x3e0
   5183 static int
   5184 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5185 {
   5186 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5187 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5188 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5189 
   5190 	/* Just return if already stalled. */
   5191 	if (txq->txq_fifo_stall)
   5192 		return 1;
   5193 
   5194 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5195 		/* Stall only occurs in half-duplex mode. */
   5196 		goto send_packet;
   5197 	}
   5198 
   5199 	if (len >= WM_82547_PAD_LEN + space) {
   5200 		txq->txq_fifo_stall = 1;
   5201 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5202 		return 1;
   5203 	}
   5204 
   5205  send_packet:
   5206 	txq->txq_fifo_head += len;
   5207 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5208 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5209 
   5210 	return 0;
   5211 }
   5212 
   5213 static int
   5214 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5215 {
   5216 	int error;
   5217 
   5218 	/*
   5219 	 * Allocate the control data structures, and create and load the
   5220 	 * DMA map for it.
   5221 	 *
   5222 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5223 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5224 	 * both sets within the same 4G segment.
   5225 	 */
   5226 	if (sc->sc_type < WM_T_82544) {
   5227 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5228 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
   5229 	} else {
   5230 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5231 		txq->txq_desc_size = sizeof(txdescs_t);
   5232 	}
   5233 
   5234 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
   5235 		    (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
   5236 		    &txq->txq_desc_rseg, 0)) != 0) {
   5237 		aprint_error_dev(sc->sc_dev,
   5238 		    "unable to allocate TX control data, error = %d\n",
   5239 		    error);
   5240 		goto fail_0;
   5241 	}
   5242 
   5243 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5244 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5245 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5246 		aprint_error_dev(sc->sc_dev,
   5247 		    "unable to map TX control data, error = %d\n", error);
   5248 		goto fail_1;
   5249 	}
   5250 
   5251 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5252 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5253 		aprint_error_dev(sc->sc_dev,
   5254 		    "unable to create TX control data DMA map, error = %d\n",
   5255 		    error);
   5256 		goto fail_2;
   5257 	}
   5258 
   5259 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5260 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5261 		aprint_error_dev(sc->sc_dev,
   5262 		    "unable to load TX control data DMA map, error = %d\n",
   5263 		    error);
   5264 		goto fail_3;
   5265 	}
   5266 
   5267 	return 0;
   5268 
   5269  fail_3:
   5270 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5271  fail_2:
   5272 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5273 	    txq->txq_desc_size);
   5274  fail_1:
   5275 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5276  fail_0:
   5277 	return error;
   5278 }
   5279 
   5280 static void
   5281 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5282 {
   5283 
   5284 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5285 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5286 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5287 	    txq->txq_desc_size);
   5288 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5289 }
   5290 
   5291 static int
   5292 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5293 {
   5294 	int error;
   5295 
   5296 	/*
   5297 	 * Allocate the control data structures, and create and load the
   5298 	 * DMA map for it.
   5299 	 *
   5300 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5301 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5302 	 * both sets within the same 4G segment.
   5303 	 */
   5304 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5305 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
   5306 		    (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
   5307 		    &rxq->rxq_desc_rseg, 0)) != 0) {
   5308 		aprint_error_dev(sc->sc_dev,
   5309 		    "unable to allocate RX control data, error = %d\n",
   5310 		    error);
   5311 		goto fail_0;
   5312 	}
   5313 
   5314 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5315 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5316 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5317 		aprint_error_dev(sc->sc_dev,
   5318 		    "unable to map RX control data, error = %d\n", error);
   5319 		goto fail_1;
   5320 	}
   5321 
   5322 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5323 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5324 		aprint_error_dev(sc->sc_dev,
   5325 		    "unable to create RX control data DMA map, error = %d\n",
   5326 		    error);
   5327 		goto fail_2;
   5328 	}
   5329 
   5330 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5331 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5332 		aprint_error_dev(sc->sc_dev,
   5333 		    "unable to load RX control data DMA map, error = %d\n",
   5334 		    error);
   5335 		goto fail_3;
   5336 	}
   5337 
   5338 	return 0;
   5339 
   5340  fail_3:
   5341 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5342  fail_2:
   5343 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5344 	    rxq->rxq_desc_size);
   5345  fail_1:
   5346 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5347  fail_0:
   5348 	return error;
   5349 }
   5350 
   5351 static void
   5352 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5353 {
   5354 
   5355 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5356 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5357 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5358 	    rxq->rxq_desc_size);
   5359 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5360 }
   5361 
   5362 
   5363 static int
   5364 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5365 {
   5366 	int i, error;
   5367 
   5368 	/* Create the transmit buffer DMA maps. */
   5369 	WM_TXQUEUELEN(txq) =
   5370 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5371 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5372 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5373 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5374 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5375 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5376 			aprint_error_dev(sc->sc_dev,
   5377 			    "unable to create Tx DMA map %d, error = %d\n",
   5378 			    i, error);
   5379 			goto fail;
   5380 		}
   5381 	}
   5382 
   5383 	return 0;
   5384 
   5385  fail:
   5386 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5387 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5388 			bus_dmamap_destroy(sc->sc_dmat,
   5389 			    txq->txq_soft[i].txs_dmamap);
   5390 	}
   5391 	return error;
   5392 }
   5393 
   5394 static void
   5395 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5396 {
   5397 	int i;
   5398 
   5399 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5400 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5401 			bus_dmamap_destroy(sc->sc_dmat,
   5402 			    txq->txq_soft[i].txs_dmamap);
   5403 	}
   5404 }
   5405 
   5406 static int
   5407 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5408 {
   5409 	int i, error;
   5410 
   5411 	/* Create the receive buffer DMA maps. */
   5412 	for (i = 0; i < WM_NRXDESC; i++) {
   5413 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5414 			    MCLBYTES, 0, 0,
   5415 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5416 			aprint_error_dev(sc->sc_dev,
   5417 			    "unable to create Rx DMA map %d error = %d\n",
   5418 			    i, error);
   5419 			goto fail;
   5420 		}
   5421 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5422 	}
   5423 
   5424 	return 0;
   5425 
   5426  fail:
   5427 	for (i = 0; i < WM_NRXDESC; i++) {
   5428 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5429 			bus_dmamap_destroy(sc->sc_dmat,
   5430 			    rxq->rxq_soft[i].rxs_dmamap);
   5431 	}
   5432 	return error;
   5433 }
   5434 
   5435 static void
   5436 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5437 {
   5438 	int i;
   5439 
   5440 	for (i = 0; i < WM_NRXDESC; i++) {
   5441 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5442 			bus_dmamap_destroy(sc->sc_dmat,
   5443 			    rxq->rxq_soft[i].rxs_dmamap);
   5444 	}
   5445 }
   5446 
   5447 /*
   5448  * wm_alloc_quques:
   5449  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5450  */
   5451 static int
   5452 wm_alloc_txrx_queues(struct wm_softc *sc)
   5453 {
   5454 	int i, error, tx_done, rx_done;
   5455 
   5456 	/*
   5457 	 * For transmission
   5458 	 */
   5459 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5460 	    KM_SLEEP);
   5461 	if (sc->sc_txq == NULL) {
   5462 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
   5463 		error = ENOMEM;
   5464 		goto fail_0;
   5465 	}
   5466 
   5467 	error = 0;
   5468 	tx_done = 0;
   5469 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5470 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5471 		txq->txq_sc = sc;
   5472 #ifdef WM_MPSAFE
   5473 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5474 #else
   5475 		txq->txq_lock = NULL;
   5476 #endif
   5477 		error = wm_alloc_tx_descs(sc, txq);
   5478 		if (error)
   5479 			break;
   5480 		error = wm_alloc_tx_buffer(sc, txq);
   5481 		if (error) {
   5482 			wm_free_tx_descs(sc, txq);
   5483 			break;
   5484 		}
   5485 		tx_done++;
   5486 	}
   5487 	if (error)
   5488 		goto fail_1;
   5489 
   5490 	/*
   5491 	 * For recieve
   5492 	 */
   5493 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5494 	    KM_SLEEP);
   5495 	if (sc->sc_rxq == NULL) {
   5496 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
   5497 		error = ENOMEM;
   5498 		goto fail_1;
   5499 	}
   5500 
   5501 	error = 0;
   5502 	rx_done = 0;
   5503 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5504 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5505 		rxq->rxq_sc = sc;
   5506 #ifdef WM_MPSAFE
   5507 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5508 #else
   5509 		rxq->rxq_lock = NULL;
   5510 #endif
   5511 		error = wm_alloc_rx_descs(sc, rxq);
   5512 		if (error)
   5513 			break;
   5514 
   5515 		error = wm_alloc_rx_buffer(sc, rxq);
   5516 		if (error) {
   5517 			wm_free_rx_descs(sc, rxq);
   5518 			break;
   5519 		}
   5520 
   5521 		rx_done++;
   5522 	}
   5523 	if (error)
   5524 		goto fail_2;
   5525 
   5526 	return 0;
   5527 
   5528  fail_2:
   5529 	for (i = 0; i < rx_done; i++) {
   5530 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5531 		wm_free_rx_buffer(sc, rxq);
   5532 		wm_free_rx_descs(sc, rxq);
   5533 		if (rxq->rxq_lock)
   5534 			mutex_obj_free(rxq->rxq_lock);
   5535 	}
   5536 	kmem_free(sc->sc_rxq,
   5537 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5538  fail_1:
   5539 	for (i = 0; i < tx_done; i++) {
   5540 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5541 		wm_free_tx_buffer(sc, txq);
   5542 		wm_free_tx_descs(sc, txq);
   5543 		if (txq->txq_lock)
   5544 			mutex_obj_free(txq->txq_lock);
   5545 	}
   5546 	kmem_free(sc->sc_txq,
   5547 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5548  fail_0:
   5549 	return error;
   5550 }
   5551 
   5552 /*
   5553  * wm_free_quques:
   5554  *	Free {tx,rx}descs and {tx,rx} buffers
   5555  */
   5556 static void
   5557 wm_free_txrx_queues(struct wm_softc *sc)
   5558 {
   5559 	int i;
   5560 
   5561 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5562 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5563 		wm_free_rx_buffer(sc, rxq);
   5564 		wm_free_rx_descs(sc, rxq);
   5565 		if (rxq->rxq_lock)
   5566 			mutex_obj_free(rxq->rxq_lock);
   5567 	}
   5568 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5569 
   5570 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5571 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5572 		wm_free_tx_buffer(sc, txq);
   5573 		wm_free_tx_descs(sc, txq);
   5574 		if (txq->txq_lock)
   5575 			mutex_obj_free(txq->txq_lock);
   5576 	}
   5577 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5578 }
   5579 
   5580 static void
   5581 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5582 {
   5583 
   5584 	KASSERT(WM_TX_LOCKED(txq));
   5585 
   5586 	/* Initialize the transmit descriptor ring. */
   5587 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5588 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5589 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5590 	txq->txq_free = WM_NTXDESC(txq);
   5591 	txq->txq_next = 0;
   5592 }
   5593 
   5594 static void
   5595 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5596 {
   5597 
   5598 	KASSERT(WM_TX_LOCKED(txq));
   5599 
   5600 	if (sc->sc_type < WM_T_82543) {
   5601 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5602 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5603 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5604 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5605 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5606 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5607 	} else {
   5608 		int qid = txq->txq_id;
   5609 
   5610 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5611 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5612 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
   5613 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5614 
   5615 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5616 			/*
   5617 			 * Don't write TDT before TCTL.EN is set.
   5618 			 * See the document.
   5619 			 */
   5620 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5621 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5622 			    | TXDCTL_WTHRESH(0));
   5623 		else {
   5624 			/* ITR / 4 */
   5625 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5626 			if (sc->sc_type >= WM_T_82540) {
   5627 				/* should be same */
   5628 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5629 			}
   5630 
   5631 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5632 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5633 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5634 		}
   5635 	}
   5636 }
   5637 
   5638 static void
   5639 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5640 {
   5641 	int i;
   5642 
   5643 	KASSERT(WM_TX_LOCKED(txq));
   5644 
   5645 	/* Initialize the transmit job descriptors. */
   5646 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5647 		txq->txq_soft[i].txs_mbuf = NULL;
   5648 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5649 	txq->txq_snext = 0;
   5650 	txq->txq_sdirty = 0;
   5651 }
   5652 
   5653 static void
   5654 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5655 {
   5656 
   5657 	KASSERT(WM_TX_LOCKED(txq));
   5658 
   5659 	/*
   5660 	 * Set up some register offsets that are different between
   5661 	 * the i82542 and the i82543 and later chips.
   5662 	 */
   5663 	if (sc->sc_type < WM_T_82543) {
   5664 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5665 	} else {
   5666 		txq->txq_tdt_reg = WMREG_TDT(0);
   5667 	}
   5668 
   5669 	wm_init_tx_descs(sc, txq);
   5670 	wm_init_tx_regs(sc, txq);
   5671 	wm_init_tx_buffer(sc, txq);
   5672 }
   5673 
   5674 static void
   5675 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5676 {
   5677 
   5678 	KASSERT(WM_RX_LOCKED(rxq));
   5679 
   5680 	/*
   5681 	 * Initialize the receive descriptor and receive job
   5682 	 * descriptor rings.
   5683 	 */
   5684 	if (sc->sc_type < WM_T_82543) {
   5685 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5686 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5687 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5688 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5689 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5690 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5691 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5692 
   5693 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5694 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5695 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5696 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5697 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5698 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5699 	} else {
   5700 		int qid = rxq->rxq_id;
   5701 
   5702 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5703 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5704 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5705 
   5706 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5707 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5708 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5709 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5710 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5711 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5712 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5713 			    | RXDCTL_WTHRESH(1));
   5714 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5715 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5716 		} else {
   5717 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5718 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5719 			/* ITR / 4 */
   5720 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5721 			/* MUST be same */
   5722 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5723 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5724 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5725 		}
   5726 	}
   5727 }
   5728 
   5729 static int
   5730 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5731 {
   5732 	struct wm_rxsoft *rxs;
   5733 	int error, i;
   5734 
   5735 	KASSERT(WM_RX_LOCKED(rxq));
   5736 
   5737 	for (i = 0; i < WM_NRXDESC; i++) {
   5738 		rxs = &rxq->rxq_soft[i];
   5739 		if (rxs->rxs_mbuf == NULL) {
   5740 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5741 				log(LOG_ERR, "%s: unable to allocate or map "
   5742 				    "rx buffer %d, error = %d\n",
   5743 				    device_xname(sc->sc_dev), i, error);
   5744 				/*
   5745 				 * XXX Should attempt to run with fewer receive
   5746 				 * XXX buffers instead of just failing.
   5747 				 */
   5748 				wm_rxdrain(rxq);
   5749 				return ENOMEM;
   5750 			}
   5751 		} else {
   5752 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5753 				wm_init_rxdesc(rxq, i);
   5754 			/*
   5755 			 * For 82575 and newer device, the RX descriptors
   5756 			 * must be initialized after the setting of RCTL.EN in
   5757 			 * wm_set_filter()
   5758 			 */
   5759 		}
   5760 	}
   5761 	rxq->rxq_ptr = 0;
   5762 	rxq->rxq_discard = 0;
   5763 	WM_RXCHAIN_RESET(rxq);
   5764 
   5765 	return 0;
   5766 }
   5767 
   5768 static int
   5769 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5770 {
   5771 
   5772 	KASSERT(WM_RX_LOCKED(rxq));
   5773 
   5774 	/*
   5775 	 * Set up some register offsets that are different between
   5776 	 * the i82542 and the i82543 and later chips.
   5777 	 */
   5778 	if (sc->sc_type < WM_T_82543) {
   5779 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5780 	} else {
   5781 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5782 	}
   5783 
   5784 	wm_init_rx_regs(sc, rxq);
   5785 	return wm_init_rx_buffer(sc, rxq);
   5786 }
   5787 
   5788 /*
   5789  * wm_init_quques:
   5790  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5791  */
   5792 static int
   5793 wm_init_txrx_queues(struct wm_softc *sc)
   5794 {
   5795 	int i, error;
   5796 
   5797 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5798 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5799 		WM_TX_LOCK(txq);
   5800 		wm_init_tx_queue(sc, txq);
   5801 		WM_TX_UNLOCK(txq);
   5802 	}
   5803 
   5804 	error = 0;
   5805 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5806 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5807 		WM_RX_LOCK(rxq);
   5808 		error = wm_init_rx_queue(sc, rxq);
   5809 		WM_RX_UNLOCK(rxq);
   5810 		if (error)
   5811 			break;
   5812 	}
   5813 
   5814 	return error;
   5815 }
   5816 
   5817 /*
   5818  * wm_tx_offload:
   5819  *
   5820  *	Set up TCP/IP checksumming parameters for the
   5821  *	specified packet.
   5822  */
   5823 static int
   5824 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5825     uint8_t *fieldsp)
   5826 {
   5827 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5828 	struct mbuf *m0 = txs->txs_mbuf;
   5829 	struct livengood_tcpip_ctxdesc *t;
   5830 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5831 	uint32_t ipcse;
   5832 	struct ether_header *eh;
   5833 	int offset, iphl;
   5834 	uint8_t fields;
   5835 
   5836 	/*
   5837 	 * XXX It would be nice if the mbuf pkthdr had offset
   5838 	 * fields for the protocol headers.
   5839 	 */
   5840 
   5841 	eh = mtod(m0, struct ether_header *);
   5842 	switch (htons(eh->ether_type)) {
   5843 	case ETHERTYPE_IP:
   5844 	case ETHERTYPE_IPV6:
   5845 		offset = ETHER_HDR_LEN;
   5846 		break;
   5847 
   5848 	case ETHERTYPE_VLAN:
   5849 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5850 		break;
   5851 
   5852 	default:
   5853 		/*
   5854 		 * Don't support this protocol or encapsulation.
   5855 		 */
   5856 		*fieldsp = 0;
   5857 		*cmdp = 0;
   5858 		return 0;
   5859 	}
   5860 
   5861 	if ((m0->m_pkthdr.csum_flags &
   5862 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   5863 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5864 	} else {
   5865 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5866 	}
   5867 	ipcse = offset + iphl - 1;
   5868 
   5869 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5870 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5871 	seg = 0;
   5872 	fields = 0;
   5873 
   5874 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5875 		int hlen = offset + iphl;
   5876 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5877 
   5878 		if (__predict_false(m0->m_len <
   5879 				    (hlen + sizeof(struct tcphdr)))) {
   5880 			/*
   5881 			 * TCP/IP headers are not in the first mbuf; we need
   5882 			 * to do this the slow and painful way.  Let's just
   5883 			 * hope this doesn't happen very often.
   5884 			 */
   5885 			struct tcphdr th;
   5886 
   5887 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5888 
   5889 			m_copydata(m0, hlen, sizeof(th), &th);
   5890 			if (v4) {
   5891 				struct ip ip;
   5892 
   5893 				m_copydata(m0, offset, sizeof(ip), &ip);
   5894 				ip.ip_len = 0;
   5895 				m_copyback(m0,
   5896 				    offset + offsetof(struct ip, ip_len),
   5897 				    sizeof(ip.ip_len), &ip.ip_len);
   5898 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5899 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5900 			} else {
   5901 				struct ip6_hdr ip6;
   5902 
   5903 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5904 				ip6.ip6_plen = 0;
   5905 				m_copyback(m0,
   5906 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5907 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5908 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5909 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5910 			}
   5911 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5912 			    sizeof(th.th_sum), &th.th_sum);
   5913 
   5914 			hlen += th.th_off << 2;
   5915 		} else {
   5916 			/*
   5917 			 * TCP/IP headers are in the first mbuf; we can do
   5918 			 * this the easy way.
   5919 			 */
   5920 			struct tcphdr *th;
   5921 
   5922 			if (v4) {
   5923 				struct ip *ip =
   5924 				    (void *)(mtod(m0, char *) + offset);
   5925 				th = (void *)(mtod(m0, char *) + hlen);
   5926 
   5927 				ip->ip_len = 0;
   5928 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5929 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5930 			} else {
   5931 				struct ip6_hdr *ip6 =
   5932 				    (void *)(mtod(m0, char *) + offset);
   5933 				th = (void *)(mtod(m0, char *) + hlen);
   5934 
   5935 				ip6->ip6_plen = 0;
   5936 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5937 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5938 			}
   5939 			hlen += th->th_off << 2;
   5940 		}
   5941 
   5942 		if (v4) {
   5943 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   5944 			cmdlen |= WTX_TCPIP_CMD_IP;
   5945 		} else {
   5946 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   5947 			ipcse = 0;
   5948 		}
   5949 		cmd |= WTX_TCPIP_CMD_TSE;
   5950 		cmdlen |= WTX_TCPIP_CMD_TSE |
   5951 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   5952 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   5953 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   5954 	}
   5955 
   5956 	/*
   5957 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   5958 	 * offload feature, if we load the context descriptor, we
   5959 	 * MUST provide valid values for IPCSS and TUCSS fields.
   5960 	 */
   5961 
   5962 	ipcs = WTX_TCPIP_IPCSS(offset) |
   5963 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   5964 	    WTX_TCPIP_IPCSE(ipcse);
   5965 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   5966 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   5967 		fields |= WTX_IXSM;
   5968 	}
   5969 
   5970 	offset += iphl;
   5971 
   5972 	if (m0->m_pkthdr.csum_flags &
   5973 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   5974 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   5975 		fields |= WTX_TXSM;
   5976 		tucs = WTX_TCPIP_TUCSS(offset) |
   5977 		    WTX_TCPIP_TUCSO(offset +
   5978 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   5979 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5980 	} else if ((m0->m_pkthdr.csum_flags &
   5981 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   5982 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   5983 		fields |= WTX_TXSM;
   5984 		tucs = WTX_TCPIP_TUCSS(offset) |
   5985 		    WTX_TCPIP_TUCSO(offset +
   5986 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   5987 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5988 	} else {
   5989 		/* Just initialize it to a valid TCP context. */
   5990 		tucs = WTX_TCPIP_TUCSS(offset) |
   5991 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   5992 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5993 	}
   5994 
   5995 	/* Fill in the context descriptor. */
   5996 	t = (struct livengood_tcpip_ctxdesc *)
   5997 	    &txq->txq_descs[txq->txq_next];
   5998 	t->tcpip_ipcs = htole32(ipcs);
   5999 	t->tcpip_tucs = htole32(tucs);
   6000 	t->tcpip_cmdlen = htole32(cmdlen);
   6001 	t->tcpip_seg = htole32(seg);
   6002 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6003 
   6004 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6005 	txs->txs_ndesc++;
   6006 
   6007 	*cmdp = cmd;
   6008 	*fieldsp = fields;
   6009 
   6010 	return 0;
   6011 }
   6012 
   6013 /*
   6014  * wm_start:		[ifnet interface function]
   6015  *
   6016  *	Start packet transmission on the interface.
   6017  */
   6018 static void
   6019 wm_start(struct ifnet *ifp)
   6020 {
   6021 	struct wm_softc *sc = ifp->if_softc;
   6022 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6023 
   6024 	WM_TX_LOCK(txq);
   6025 	if (!sc->sc_stopping)
   6026 		wm_start_locked(ifp);
   6027 	WM_TX_UNLOCK(txq);
   6028 }
   6029 
   6030 static void
   6031 wm_start_locked(struct ifnet *ifp)
   6032 {
   6033 	struct wm_softc *sc = ifp->if_softc;
   6034 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6035 	struct mbuf *m0;
   6036 	struct m_tag *mtag;
   6037 	struct wm_txsoft *txs;
   6038 	bus_dmamap_t dmamap;
   6039 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6040 	bus_addr_t curaddr;
   6041 	bus_size_t seglen, curlen;
   6042 	uint32_t cksumcmd;
   6043 	uint8_t cksumfields;
   6044 
   6045 	KASSERT(WM_TX_LOCKED(txq));
   6046 
   6047 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6048 		return;
   6049 
   6050 	/* Remember the previous number of free descriptors. */
   6051 	ofree = txq->txq_free;
   6052 
   6053 	/*
   6054 	 * Loop through the send queue, setting up transmit descriptors
   6055 	 * until we drain the queue, or use up all available transmit
   6056 	 * descriptors.
   6057 	 */
   6058 	for (;;) {
   6059 		m0 = NULL;
   6060 
   6061 		/* Get a work queue entry. */
   6062 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6063 			wm_txeof(sc);
   6064 			if (txq->txq_sfree == 0) {
   6065 				DPRINTF(WM_DEBUG_TX,
   6066 				    ("%s: TX: no free job descriptors\n",
   6067 					device_xname(sc->sc_dev)));
   6068 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6069 				break;
   6070 			}
   6071 		}
   6072 
   6073 		/* Grab a packet off the queue. */
   6074 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6075 		if (m0 == NULL)
   6076 			break;
   6077 
   6078 		DPRINTF(WM_DEBUG_TX,
   6079 		    ("%s: TX: have packet to transmit: %p\n",
   6080 		    device_xname(sc->sc_dev), m0));
   6081 
   6082 		txs = &txq->txq_soft[txq->txq_snext];
   6083 		dmamap = txs->txs_dmamap;
   6084 
   6085 		use_tso = (m0->m_pkthdr.csum_flags &
   6086 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6087 
   6088 		/*
   6089 		 * So says the Linux driver:
   6090 		 * The controller does a simple calculation to make sure
   6091 		 * there is enough room in the FIFO before initiating the
   6092 		 * DMA for each buffer.  The calc is:
   6093 		 *	4 = ceil(buffer len / MSS)
   6094 		 * To make sure we don't overrun the FIFO, adjust the max
   6095 		 * buffer len if the MSS drops.
   6096 		 */
   6097 		dmamap->dm_maxsegsz =
   6098 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6099 		    ? m0->m_pkthdr.segsz << 2
   6100 		    : WTX_MAX_LEN;
   6101 
   6102 		/*
   6103 		 * Load the DMA map.  If this fails, the packet either
   6104 		 * didn't fit in the allotted number of segments, or we
   6105 		 * were short on resources.  For the too-many-segments
   6106 		 * case, we simply report an error and drop the packet,
   6107 		 * since we can't sanely copy a jumbo packet to a single
   6108 		 * buffer.
   6109 		 */
   6110 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6111 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6112 		if (error) {
   6113 			if (error == EFBIG) {
   6114 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6115 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6116 				    "DMA segments, dropping...\n",
   6117 				    device_xname(sc->sc_dev));
   6118 				wm_dump_mbuf_chain(sc, m0);
   6119 				m_freem(m0);
   6120 				continue;
   6121 			}
   6122 			/*  Short on resources, just stop for now. */
   6123 			DPRINTF(WM_DEBUG_TX,
   6124 			    ("%s: TX: dmamap load failed: %d\n",
   6125 			    device_xname(sc->sc_dev), error));
   6126 			break;
   6127 		}
   6128 
   6129 		segs_needed = dmamap->dm_nsegs;
   6130 		if (use_tso) {
   6131 			/* For sentinel descriptor; see below. */
   6132 			segs_needed++;
   6133 		}
   6134 
   6135 		/*
   6136 		 * Ensure we have enough descriptors free to describe
   6137 		 * the packet.  Note, we always reserve one descriptor
   6138 		 * at the end of the ring due to the semantics of the
   6139 		 * TDT register, plus one more in the event we need
   6140 		 * to load offload context.
   6141 		 */
   6142 		if (segs_needed > txq->txq_free - 2) {
   6143 			/*
   6144 			 * Not enough free descriptors to transmit this
   6145 			 * packet.  We haven't committed anything yet,
   6146 			 * so just unload the DMA map, put the packet
   6147 			 * pack on the queue, and punt.  Notify the upper
   6148 			 * layer that there are no more slots left.
   6149 			 */
   6150 			DPRINTF(WM_DEBUG_TX,
   6151 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6152 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6153 			    segs_needed, txq->txq_free - 1));
   6154 			ifp->if_flags |= IFF_OACTIVE;
   6155 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6156 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6157 			break;
   6158 		}
   6159 
   6160 		/*
   6161 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6162 		 * once we know we can transmit the packet, since we
   6163 		 * do some internal FIFO space accounting here.
   6164 		 */
   6165 		if (sc->sc_type == WM_T_82547 &&
   6166 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6167 			DPRINTF(WM_DEBUG_TX,
   6168 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6169 			    device_xname(sc->sc_dev)));
   6170 			ifp->if_flags |= IFF_OACTIVE;
   6171 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6172 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6173 			break;
   6174 		}
   6175 
   6176 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6177 
   6178 		DPRINTF(WM_DEBUG_TX,
   6179 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6180 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6181 
   6182 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6183 
   6184 		/*
   6185 		 * Store a pointer to the packet so that we can free it
   6186 		 * later.
   6187 		 *
   6188 		 * Initially, we consider the number of descriptors the
   6189 		 * packet uses the number of DMA segments.  This may be
   6190 		 * incremented by 1 if we do checksum offload (a descriptor
   6191 		 * is used to set the checksum context).
   6192 		 */
   6193 		txs->txs_mbuf = m0;
   6194 		txs->txs_firstdesc = txq->txq_next;
   6195 		txs->txs_ndesc = segs_needed;
   6196 
   6197 		/* Set up offload parameters for this packet. */
   6198 		if (m0->m_pkthdr.csum_flags &
   6199 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6200 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6201 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6202 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6203 					  &cksumfields) != 0) {
   6204 				/* Error message already displayed. */
   6205 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6206 				continue;
   6207 			}
   6208 		} else {
   6209 			cksumcmd = 0;
   6210 			cksumfields = 0;
   6211 		}
   6212 
   6213 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6214 
   6215 		/* Sync the DMA map. */
   6216 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6217 		    BUS_DMASYNC_PREWRITE);
   6218 
   6219 		/* Initialize the transmit descriptor. */
   6220 		for (nexttx = txq->txq_next, seg = 0;
   6221 		     seg < dmamap->dm_nsegs; seg++) {
   6222 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6223 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6224 			     seglen != 0;
   6225 			     curaddr += curlen, seglen -= curlen,
   6226 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6227 				curlen = seglen;
   6228 
   6229 				/*
   6230 				 * So says the Linux driver:
   6231 				 * Work around for premature descriptor
   6232 				 * write-backs in TSO mode.  Append a
   6233 				 * 4-byte sentinel descriptor.
   6234 				 */
   6235 				if (use_tso &&
   6236 				    seg == dmamap->dm_nsegs - 1 &&
   6237 				    curlen > 8)
   6238 					curlen -= 4;
   6239 
   6240 				wm_set_dma_addr(
   6241 				    &txq->txq_descs[nexttx].wtx_addr,
   6242 				    curaddr);
   6243 				txq->txq_descs[nexttx].wtx_cmdlen =
   6244 				    htole32(cksumcmd | curlen);
   6245 				txq->txq_descs[nexttx].wtx_fields.wtxu_status =
   6246 				    0;
   6247 				txq->txq_descs[nexttx].wtx_fields.wtxu_options =
   6248 				    cksumfields;
   6249 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
   6250 				lasttx = nexttx;
   6251 
   6252 				DPRINTF(WM_DEBUG_TX,
   6253 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6254 				     "len %#04zx\n",
   6255 				    device_xname(sc->sc_dev), nexttx,
   6256 				    (uint64_t)curaddr, curlen));
   6257 			}
   6258 		}
   6259 
   6260 		KASSERT(lasttx != -1);
   6261 
   6262 		/*
   6263 		 * Set up the command byte on the last descriptor of
   6264 		 * the packet.  If we're in the interrupt delay window,
   6265 		 * delay the interrupt.
   6266 		 */
   6267 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6268 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6269 
   6270 		/*
   6271 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6272 		 * up the descriptor to encapsulate the packet for us.
   6273 		 *
   6274 		 * This is only valid on the last descriptor of the packet.
   6275 		 */
   6276 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6277 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6278 			    htole32(WTX_CMD_VLE);
   6279 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6280 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6281 		}
   6282 
   6283 		txs->txs_lastdesc = lasttx;
   6284 
   6285 		DPRINTF(WM_DEBUG_TX,
   6286 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6287 		    device_xname(sc->sc_dev),
   6288 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6289 
   6290 		/* Sync the descriptors we're using. */
   6291 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6292 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6293 
   6294 		/* Give the packet to the chip. */
   6295 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6296 
   6297 		DPRINTF(WM_DEBUG_TX,
   6298 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6299 
   6300 		DPRINTF(WM_DEBUG_TX,
   6301 		    ("%s: TX: finished transmitting packet, job %d\n",
   6302 		    device_xname(sc->sc_dev), txq->txq_snext));
   6303 
   6304 		/* Advance the tx pointer. */
   6305 		txq->txq_free -= txs->txs_ndesc;
   6306 		txq->txq_next = nexttx;
   6307 
   6308 		txq->txq_sfree--;
   6309 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6310 
   6311 		/* Pass the packet to any BPF listeners. */
   6312 		bpf_mtap(ifp, m0);
   6313 	}
   6314 
   6315 	if (m0 != NULL) {
   6316 		ifp->if_flags |= IFF_OACTIVE;
   6317 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6318 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6319 		m_freem(m0);
   6320 	}
   6321 
   6322 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6323 		/* No more slots; notify upper layer. */
   6324 		ifp->if_flags |= IFF_OACTIVE;
   6325 	}
   6326 
   6327 	if (txq->txq_free != ofree) {
   6328 		/* Set a watchdog timer in case the chip flakes out. */
   6329 		ifp->if_timer = 5;
   6330 	}
   6331 }
   6332 
   6333 /*
   6334  * wm_nq_tx_offload:
   6335  *
   6336  *	Set up TCP/IP checksumming parameters for the
   6337  *	specified packet, for NEWQUEUE devices
   6338  */
   6339 static int
   6340 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6341     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6342 {
   6343 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6344 	struct mbuf *m0 = txs->txs_mbuf;
   6345 	struct m_tag *mtag;
   6346 	uint32_t vl_len, mssidx, cmdc;
   6347 	struct ether_header *eh;
   6348 	int offset, iphl;
   6349 
   6350 	/*
   6351 	 * XXX It would be nice if the mbuf pkthdr had offset
   6352 	 * fields for the protocol headers.
   6353 	 */
   6354 	*cmdlenp = 0;
   6355 	*fieldsp = 0;
   6356 
   6357 	eh = mtod(m0, struct ether_header *);
   6358 	switch (htons(eh->ether_type)) {
   6359 	case ETHERTYPE_IP:
   6360 	case ETHERTYPE_IPV6:
   6361 		offset = ETHER_HDR_LEN;
   6362 		break;
   6363 
   6364 	case ETHERTYPE_VLAN:
   6365 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6366 		break;
   6367 
   6368 	default:
   6369 		/* Don't support this protocol or encapsulation. */
   6370 		*do_csum = false;
   6371 		return 0;
   6372 	}
   6373 	*do_csum = true;
   6374 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6375 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6376 
   6377 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6378 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6379 
   6380 	if ((m0->m_pkthdr.csum_flags &
   6381 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   6382 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6383 	} else {
   6384 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6385 	}
   6386 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6387 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6388 
   6389 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6390 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6391 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6392 		*cmdlenp |= NQTX_CMD_VLE;
   6393 	}
   6394 
   6395 	mssidx = 0;
   6396 
   6397 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6398 		int hlen = offset + iphl;
   6399 		int tcp_hlen;
   6400 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6401 
   6402 		if (__predict_false(m0->m_len <
   6403 				    (hlen + sizeof(struct tcphdr)))) {
   6404 			/*
   6405 			 * TCP/IP headers are not in the first mbuf; we need
   6406 			 * to do this the slow and painful way.  Let's just
   6407 			 * hope this doesn't happen very often.
   6408 			 */
   6409 			struct tcphdr th;
   6410 
   6411 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6412 
   6413 			m_copydata(m0, hlen, sizeof(th), &th);
   6414 			if (v4) {
   6415 				struct ip ip;
   6416 
   6417 				m_copydata(m0, offset, sizeof(ip), &ip);
   6418 				ip.ip_len = 0;
   6419 				m_copyback(m0,
   6420 				    offset + offsetof(struct ip, ip_len),
   6421 				    sizeof(ip.ip_len), &ip.ip_len);
   6422 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6423 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6424 			} else {
   6425 				struct ip6_hdr ip6;
   6426 
   6427 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6428 				ip6.ip6_plen = 0;
   6429 				m_copyback(m0,
   6430 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6431 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6432 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6433 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6434 			}
   6435 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6436 			    sizeof(th.th_sum), &th.th_sum);
   6437 
   6438 			tcp_hlen = th.th_off << 2;
   6439 		} else {
   6440 			/*
   6441 			 * TCP/IP headers are in the first mbuf; we can do
   6442 			 * this the easy way.
   6443 			 */
   6444 			struct tcphdr *th;
   6445 
   6446 			if (v4) {
   6447 				struct ip *ip =
   6448 				    (void *)(mtod(m0, char *) + offset);
   6449 				th = (void *)(mtod(m0, char *) + hlen);
   6450 
   6451 				ip->ip_len = 0;
   6452 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6453 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6454 			} else {
   6455 				struct ip6_hdr *ip6 =
   6456 				    (void *)(mtod(m0, char *) + offset);
   6457 				th = (void *)(mtod(m0, char *) + hlen);
   6458 
   6459 				ip6->ip6_plen = 0;
   6460 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6461 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6462 			}
   6463 			tcp_hlen = th->th_off << 2;
   6464 		}
   6465 		hlen += tcp_hlen;
   6466 		*cmdlenp |= NQTX_CMD_TSE;
   6467 
   6468 		if (v4) {
   6469 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6470 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6471 		} else {
   6472 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6473 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6474 		}
   6475 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6476 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6477 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6478 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6479 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6480 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6481 	} else {
   6482 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6483 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6484 	}
   6485 
   6486 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6487 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6488 		cmdc |= NQTXC_CMD_IP4;
   6489 	}
   6490 
   6491 	if (m0->m_pkthdr.csum_flags &
   6492 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6493 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6494 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6495 			cmdc |= NQTXC_CMD_TCP;
   6496 		} else {
   6497 			cmdc |= NQTXC_CMD_UDP;
   6498 		}
   6499 		cmdc |= NQTXC_CMD_IP4;
   6500 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6501 	}
   6502 	if (m0->m_pkthdr.csum_flags &
   6503 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6504 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6505 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6506 			cmdc |= NQTXC_CMD_TCP;
   6507 		} else {
   6508 			cmdc |= NQTXC_CMD_UDP;
   6509 		}
   6510 		cmdc |= NQTXC_CMD_IP6;
   6511 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6512 	}
   6513 
   6514 	/* Fill in the context descriptor. */
   6515 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6516 	    htole32(vl_len);
   6517 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6518 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6519 	    htole32(cmdc);
   6520 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6521 	    htole32(mssidx);
   6522 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6523 	DPRINTF(WM_DEBUG_TX,
   6524 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6525 	    txq->txq_next, 0, vl_len));
   6526 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6527 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6528 	txs->txs_ndesc++;
   6529 	return 0;
   6530 }
   6531 
   6532 /*
   6533  * wm_nq_start:		[ifnet interface function]
   6534  *
   6535  *	Start packet transmission on the interface for NEWQUEUE devices
   6536  */
   6537 static void
   6538 wm_nq_start(struct ifnet *ifp)
   6539 {
   6540 	struct wm_softc *sc = ifp->if_softc;
   6541 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6542 
   6543 	WM_TX_LOCK(txq);
   6544 	if (!sc->sc_stopping)
   6545 		wm_nq_start_locked(ifp);
   6546 	WM_TX_UNLOCK(txq);
   6547 }
   6548 
   6549 static void
   6550 wm_nq_start_locked(struct ifnet *ifp)
   6551 {
   6552 	struct wm_softc *sc = ifp->if_softc;
   6553 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6554 	struct mbuf *m0;
   6555 	struct m_tag *mtag;
   6556 	struct wm_txsoft *txs;
   6557 	bus_dmamap_t dmamap;
   6558 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6559 	bool do_csum, sent;
   6560 
   6561 	KASSERT(WM_TX_LOCKED(txq));
   6562 
   6563 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6564 		return;
   6565 
   6566 	sent = false;
   6567 
   6568 	/*
   6569 	 * Loop through the send queue, setting up transmit descriptors
   6570 	 * until we drain the queue, or use up all available transmit
   6571 	 * descriptors.
   6572 	 */
   6573 	for (;;) {
   6574 		m0 = NULL;
   6575 
   6576 		/* Get a work queue entry. */
   6577 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6578 			wm_txeof(sc);
   6579 			if (txq->txq_sfree == 0) {
   6580 				DPRINTF(WM_DEBUG_TX,
   6581 				    ("%s: TX: no free job descriptors\n",
   6582 					device_xname(sc->sc_dev)));
   6583 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6584 				break;
   6585 			}
   6586 		}
   6587 
   6588 		/* Grab a packet off the queue. */
   6589 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6590 		if (m0 == NULL)
   6591 			break;
   6592 
   6593 		DPRINTF(WM_DEBUG_TX,
   6594 		    ("%s: TX: have packet to transmit: %p\n",
   6595 		    device_xname(sc->sc_dev), m0));
   6596 
   6597 		txs = &txq->txq_soft[txq->txq_snext];
   6598 		dmamap = txs->txs_dmamap;
   6599 
   6600 		/*
   6601 		 * Load the DMA map.  If this fails, the packet either
   6602 		 * didn't fit in the allotted number of segments, or we
   6603 		 * were short on resources.  For the too-many-segments
   6604 		 * case, we simply report an error and drop the packet,
   6605 		 * since we can't sanely copy a jumbo packet to a single
   6606 		 * buffer.
   6607 		 */
   6608 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6609 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6610 		if (error) {
   6611 			if (error == EFBIG) {
   6612 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6613 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6614 				    "DMA segments, dropping...\n",
   6615 				    device_xname(sc->sc_dev));
   6616 				wm_dump_mbuf_chain(sc, m0);
   6617 				m_freem(m0);
   6618 				continue;
   6619 			}
   6620 			/* Short on resources, just stop for now. */
   6621 			DPRINTF(WM_DEBUG_TX,
   6622 			    ("%s: TX: dmamap load failed: %d\n",
   6623 			    device_xname(sc->sc_dev), error));
   6624 			break;
   6625 		}
   6626 
   6627 		segs_needed = dmamap->dm_nsegs;
   6628 
   6629 		/*
   6630 		 * Ensure we have enough descriptors free to describe
   6631 		 * the packet.  Note, we always reserve one descriptor
   6632 		 * at the end of the ring due to the semantics of the
   6633 		 * TDT register, plus one more in the event we need
   6634 		 * to load offload context.
   6635 		 */
   6636 		if (segs_needed > txq->txq_free - 2) {
   6637 			/*
   6638 			 * Not enough free descriptors to transmit this
   6639 			 * packet.  We haven't committed anything yet,
   6640 			 * so just unload the DMA map, put the packet
   6641 			 * pack on the queue, and punt.  Notify the upper
   6642 			 * layer that there are no more slots left.
   6643 			 */
   6644 			DPRINTF(WM_DEBUG_TX,
   6645 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6646 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6647 			    segs_needed, txq->txq_free - 1));
   6648 			ifp->if_flags |= IFF_OACTIVE;
   6649 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6650 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6651 			break;
   6652 		}
   6653 
   6654 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6655 
   6656 		DPRINTF(WM_DEBUG_TX,
   6657 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6658 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6659 
   6660 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6661 
   6662 		/*
   6663 		 * Store a pointer to the packet so that we can free it
   6664 		 * later.
   6665 		 *
   6666 		 * Initially, we consider the number of descriptors the
   6667 		 * packet uses the number of DMA segments.  This may be
   6668 		 * incremented by 1 if we do checksum offload (a descriptor
   6669 		 * is used to set the checksum context).
   6670 		 */
   6671 		txs->txs_mbuf = m0;
   6672 		txs->txs_firstdesc = txq->txq_next;
   6673 		txs->txs_ndesc = segs_needed;
   6674 
   6675 		/* Set up offload parameters for this packet. */
   6676 		uint32_t cmdlen, fields, dcmdlen;
   6677 		if (m0->m_pkthdr.csum_flags &
   6678 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6679 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6680 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6681 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6682 			    &do_csum) != 0) {
   6683 				/* Error message already displayed. */
   6684 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6685 				continue;
   6686 			}
   6687 		} else {
   6688 			do_csum = false;
   6689 			cmdlen = 0;
   6690 			fields = 0;
   6691 		}
   6692 
   6693 		/* Sync the DMA map. */
   6694 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6695 		    BUS_DMASYNC_PREWRITE);
   6696 
   6697 		/* Initialize the first transmit descriptor. */
   6698 		nexttx = txq->txq_next;
   6699 		if (!do_csum) {
   6700 			/* setup a legacy descriptor */
   6701 			wm_set_dma_addr(
   6702 			    &txq->txq_descs[nexttx].wtx_addr,
   6703 			    dmamap->dm_segs[0].ds_addr);
   6704 			txq->txq_descs[nexttx].wtx_cmdlen =
   6705 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6706 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6707 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6708 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6709 			    NULL) {
   6710 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6711 				    htole32(WTX_CMD_VLE);
   6712 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6713 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6714 			} else {
   6715 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6716 			}
   6717 			dcmdlen = 0;
   6718 		} else {
   6719 			/* setup an advanced data descriptor */
   6720 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6721 			    htole64(dmamap->dm_segs[0].ds_addr);
   6722 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6723 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6724 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6725 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6726 			    htole32(fields);
   6727 			DPRINTF(WM_DEBUG_TX,
   6728 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6729 			    device_xname(sc->sc_dev), nexttx,
   6730 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6731 			DPRINTF(WM_DEBUG_TX,
   6732 			    ("\t 0x%08x%08x\n", fields,
   6733 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6734 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6735 		}
   6736 
   6737 		lasttx = nexttx;
   6738 		nexttx = WM_NEXTTX(txq, nexttx);
   6739 		/*
   6740 		 * fill in the next descriptors. legacy or adcanced format
   6741 		 * is the same here
   6742 		 */
   6743 		for (seg = 1; seg < dmamap->dm_nsegs;
   6744 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6745 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6746 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6747 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6748 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6749 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6750 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6751 			lasttx = nexttx;
   6752 
   6753 			DPRINTF(WM_DEBUG_TX,
   6754 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6755 			     "len %#04zx\n",
   6756 			    device_xname(sc->sc_dev), nexttx,
   6757 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6758 			    dmamap->dm_segs[seg].ds_len));
   6759 		}
   6760 
   6761 		KASSERT(lasttx != -1);
   6762 
   6763 		/*
   6764 		 * Set up the command byte on the last descriptor of
   6765 		 * the packet.  If we're in the interrupt delay window,
   6766 		 * delay the interrupt.
   6767 		 */
   6768 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6769 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6770 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6771 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6772 
   6773 		txs->txs_lastdesc = lasttx;
   6774 
   6775 		DPRINTF(WM_DEBUG_TX,
   6776 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6777 		    device_xname(sc->sc_dev),
   6778 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6779 
   6780 		/* Sync the descriptors we're using. */
   6781 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6782 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6783 
   6784 		/* Give the packet to the chip. */
   6785 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6786 		sent = true;
   6787 
   6788 		DPRINTF(WM_DEBUG_TX,
   6789 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6790 
   6791 		DPRINTF(WM_DEBUG_TX,
   6792 		    ("%s: TX: finished transmitting packet, job %d\n",
   6793 		    device_xname(sc->sc_dev), txq->txq_snext));
   6794 
   6795 		/* Advance the tx pointer. */
   6796 		txq->txq_free -= txs->txs_ndesc;
   6797 		txq->txq_next = nexttx;
   6798 
   6799 		txq->txq_sfree--;
   6800 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6801 
   6802 		/* Pass the packet to any BPF listeners. */
   6803 		bpf_mtap(ifp, m0);
   6804 	}
   6805 
   6806 	if (m0 != NULL) {
   6807 		ifp->if_flags |= IFF_OACTIVE;
   6808 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6809 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6810 		m_freem(m0);
   6811 	}
   6812 
   6813 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6814 		/* No more slots; notify upper layer. */
   6815 		ifp->if_flags |= IFF_OACTIVE;
   6816 	}
   6817 
   6818 	if (sent) {
   6819 		/* Set a watchdog timer in case the chip flakes out. */
   6820 		ifp->if_timer = 5;
   6821 	}
   6822 }
   6823 
   6824 /* Interrupt */
   6825 
   6826 /*
   6827  * wm_txeof:
   6828  *
   6829  *	Helper; handle transmit interrupts.
   6830  */
   6831 static int
   6832 wm_txeof(struct wm_softc *sc)
   6833 {
   6834 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6835 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6836 	struct wm_txsoft *txs;
   6837 	bool processed = false;
   6838 	int count = 0;
   6839 	int i;
   6840 	uint8_t status;
   6841 
   6842 	if (sc->sc_stopping)
   6843 		return 0;
   6844 
   6845 	ifp->if_flags &= ~IFF_OACTIVE;
   6846 
   6847 	/*
   6848 	 * Go through the Tx list and free mbufs for those
   6849 	 * frames which have been transmitted.
   6850 	 */
   6851 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6852 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6853 		txs = &txq->txq_soft[i];
   6854 
   6855 		DPRINTF(WM_DEBUG_TX,
   6856 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   6857 
   6858 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6859 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6860 
   6861 		status =
   6862 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6863 		if ((status & WTX_ST_DD) == 0) {
   6864 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6865 			    BUS_DMASYNC_PREREAD);
   6866 			break;
   6867 		}
   6868 
   6869 		processed = true;
   6870 		count++;
   6871 		DPRINTF(WM_DEBUG_TX,
   6872 		    ("%s: TX: job %d done: descs %d..%d\n",
   6873 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6874 		    txs->txs_lastdesc));
   6875 
   6876 		/*
   6877 		 * XXX We should probably be using the statistics
   6878 		 * XXX registers, but I don't know if they exist
   6879 		 * XXX on chips before the i82544.
   6880 		 */
   6881 
   6882 #ifdef WM_EVENT_COUNTERS
   6883 		if (status & WTX_ST_TU)
   6884 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   6885 #endif /* WM_EVENT_COUNTERS */
   6886 
   6887 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   6888 			ifp->if_oerrors++;
   6889 			if (status & WTX_ST_LC)
   6890 				log(LOG_WARNING, "%s: late collision\n",
   6891 				    device_xname(sc->sc_dev));
   6892 			else if (status & WTX_ST_EC) {
   6893 				ifp->if_collisions += 16;
   6894 				log(LOG_WARNING, "%s: excessive collisions\n",
   6895 				    device_xname(sc->sc_dev));
   6896 			}
   6897 		} else
   6898 			ifp->if_opackets++;
   6899 
   6900 		txq->txq_free += txs->txs_ndesc;
   6901 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   6902 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   6903 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   6904 		m_freem(txs->txs_mbuf);
   6905 		txs->txs_mbuf = NULL;
   6906 	}
   6907 
   6908 	/* Update the dirty transmit buffer pointer. */
   6909 	txq->txq_sdirty = i;
   6910 	DPRINTF(WM_DEBUG_TX,
   6911 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   6912 
   6913 	if (count != 0)
   6914 		rnd_add_uint32(&sc->rnd_source, count);
   6915 
   6916 	/*
   6917 	 * If there are no more pending transmissions, cancel the watchdog
   6918 	 * timer.
   6919 	 */
   6920 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   6921 		ifp->if_timer = 0;
   6922 
   6923 	return processed;
   6924 }
   6925 
   6926 /*
   6927  * wm_rxeof:
   6928  *
   6929  *	Helper; handle receive interrupts.
   6930  */
   6931 static void
   6932 wm_rxeof(struct wm_rxqueue *rxq)
   6933 {
   6934 	struct wm_softc *sc = rxq->rxq_sc;
   6935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6936 	struct wm_rxsoft *rxs;
   6937 	struct mbuf *m;
   6938 	int i, len;
   6939 	int count = 0;
   6940 	uint8_t status, errors;
   6941 	uint16_t vlantag;
   6942 
   6943 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   6944 		rxs = &rxq->rxq_soft[i];
   6945 
   6946 		DPRINTF(WM_DEBUG_RX,
   6947 		    ("%s: RX: checking descriptor %d\n",
   6948 		    device_xname(sc->sc_dev), i));
   6949 
   6950 		wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6951 
   6952 		status = rxq->rxq_descs[i].wrx_status;
   6953 		errors = rxq->rxq_descs[i].wrx_errors;
   6954 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   6955 		vlantag = rxq->rxq_descs[i].wrx_special;
   6956 
   6957 		if ((status & WRX_ST_DD) == 0) {
   6958 			/* We have processed all of the receive descriptors. */
   6959 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   6960 			break;
   6961 		}
   6962 
   6963 		count++;
   6964 		if (__predict_false(rxq->rxq_discard)) {
   6965 			DPRINTF(WM_DEBUG_RX,
   6966 			    ("%s: RX: discarding contents of descriptor %d\n",
   6967 			    device_xname(sc->sc_dev), i));
   6968 			wm_init_rxdesc(rxq, i);
   6969 			if (status & WRX_ST_EOP) {
   6970 				/* Reset our state. */
   6971 				DPRINTF(WM_DEBUG_RX,
   6972 				    ("%s: RX: resetting rxdiscard -> 0\n",
   6973 				    device_xname(sc->sc_dev)));
   6974 				rxq->rxq_discard = 0;
   6975 			}
   6976 			continue;
   6977 		}
   6978 
   6979 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6980 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   6981 
   6982 		m = rxs->rxs_mbuf;
   6983 
   6984 		/*
   6985 		 * Add a new receive buffer to the ring, unless of
   6986 		 * course the length is zero. Treat the latter as a
   6987 		 * failed mapping.
   6988 		 */
   6989 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   6990 			/*
   6991 			 * Failed, throw away what we've done so
   6992 			 * far, and discard the rest of the packet.
   6993 			 */
   6994 			ifp->if_ierrors++;
   6995 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6996 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   6997 			wm_init_rxdesc(rxq, i);
   6998 			if ((status & WRX_ST_EOP) == 0)
   6999 				rxq->rxq_discard = 1;
   7000 			if (rxq->rxq_head != NULL)
   7001 				m_freem(rxq->rxq_head);
   7002 			WM_RXCHAIN_RESET(rxq);
   7003 			DPRINTF(WM_DEBUG_RX,
   7004 			    ("%s: RX: Rx buffer allocation failed, "
   7005 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7006 			    rxq->rxq_discard ? " (discard)" : ""));
   7007 			continue;
   7008 		}
   7009 
   7010 		m->m_len = len;
   7011 		rxq->rxq_len += len;
   7012 		DPRINTF(WM_DEBUG_RX,
   7013 		    ("%s: RX: buffer at %p len %d\n",
   7014 		    device_xname(sc->sc_dev), m->m_data, len));
   7015 
   7016 		/* If this is not the end of the packet, keep looking. */
   7017 		if ((status & WRX_ST_EOP) == 0) {
   7018 			WM_RXCHAIN_LINK(rxq, m);
   7019 			DPRINTF(WM_DEBUG_RX,
   7020 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7021 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7022 			continue;
   7023 		}
   7024 
   7025 		/*
   7026 		 * Okay, we have the entire packet now.  The chip is
   7027 		 * configured to include the FCS except I350 and I21[01]
   7028 		 * (not all chips can be configured to strip it),
   7029 		 * so we need to trim it.
   7030 		 * May need to adjust length of previous mbuf in the
   7031 		 * chain if the current mbuf is too short.
   7032 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7033 		 * is always set in I350, so we don't trim it.
   7034 		 */
   7035 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7036 		    && (sc->sc_type != WM_T_I210)
   7037 		    && (sc->sc_type != WM_T_I211)) {
   7038 			if (m->m_len < ETHER_CRC_LEN) {
   7039 				rxq->rxq_tail->m_len
   7040 				    -= (ETHER_CRC_LEN - m->m_len);
   7041 				m->m_len = 0;
   7042 			} else
   7043 				m->m_len -= ETHER_CRC_LEN;
   7044 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7045 		} else
   7046 			len = rxq->rxq_len;
   7047 
   7048 		WM_RXCHAIN_LINK(rxq, m);
   7049 
   7050 		*rxq->rxq_tailp = NULL;
   7051 		m = rxq->rxq_head;
   7052 
   7053 		WM_RXCHAIN_RESET(rxq);
   7054 
   7055 		DPRINTF(WM_DEBUG_RX,
   7056 		    ("%s: RX: have entire packet, len -> %d\n",
   7057 		    device_xname(sc->sc_dev), len));
   7058 
   7059 		/* If an error occurred, update stats and drop the packet. */
   7060 		if (errors &
   7061 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7062 			if (errors & WRX_ER_SE)
   7063 				log(LOG_WARNING, "%s: symbol error\n",
   7064 				    device_xname(sc->sc_dev));
   7065 			else if (errors & WRX_ER_SEQ)
   7066 				log(LOG_WARNING, "%s: receive sequence error\n",
   7067 				    device_xname(sc->sc_dev));
   7068 			else if (errors & WRX_ER_CE)
   7069 				log(LOG_WARNING, "%s: CRC error\n",
   7070 				    device_xname(sc->sc_dev));
   7071 			m_freem(m);
   7072 			continue;
   7073 		}
   7074 
   7075 		/* No errors.  Receive the packet. */
   7076 		m->m_pkthdr.rcvif = ifp;
   7077 		m->m_pkthdr.len = len;
   7078 
   7079 		/*
   7080 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7081 		 * for us.  Associate the tag with the packet.
   7082 		 */
   7083 		/* XXXX should check for i350 and i354 */
   7084 		if ((status & WRX_ST_VP) != 0) {
   7085 			VLAN_INPUT_TAG(ifp, m,
   7086 			    le16toh(vlantag),
   7087 			    continue);
   7088 		}
   7089 
   7090 		/* Set up checksum info for this packet. */
   7091 		if ((status & WRX_ST_IXSM) == 0) {
   7092 			if (status & WRX_ST_IPCS) {
   7093 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7094 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7095 				if (errors & WRX_ER_IPE)
   7096 					m->m_pkthdr.csum_flags |=
   7097 					    M_CSUM_IPv4_BAD;
   7098 			}
   7099 			if (status & WRX_ST_TCPCS) {
   7100 				/*
   7101 				 * Note: we don't know if this was TCP or UDP,
   7102 				 * so we just set both bits, and expect the
   7103 				 * upper layers to deal.
   7104 				 */
   7105 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7106 				m->m_pkthdr.csum_flags |=
   7107 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7108 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7109 				if (errors & WRX_ER_TCPE)
   7110 					m->m_pkthdr.csum_flags |=
   7111 					    M_CSUM_TCP_UDP_BAD;
   7112 			}
   7113 		}
   7114 
   7115 		ifp->if_ipackets++;
   7116 
   7117 		WM_RX_UNLOCK(rxq);
   7118 
   7119 		/* Pass this up to any BPF listeners. */
   7120 		bpf_mtap(ifp, m);
   7121 
   7122 		/* Pass it on. */
   7123 		(*ifp->if_input)(ifp, m);
   7124 
   7125 		WM_RX_LOCK(rxq);
   7126 
   7127 		if (sc->sc_stopping)
   7128 			break;
   7129 	}
   7130 
   7131 	/* Update the receive pointer. */
   7132 	rxq->rxq_ptr = i;
   7133 	if (count != 0)
   7134 		rnd_add_uint32(&sc->rnd_source, count);
   7135 
   7136 	DPRINTF(WM_DEBUG_RX,
   7137 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7138 }
   7139 
   7140 /*
   7141  * wm_linkintr_gmii:
   7142  *
   7143  *	Helper; handle link interrupts for GMII.
   7144  */
   7145 static void
   7146 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7147 {
   7148 
   7149 	KASSERT(WM_CORE_LOCKED(sc));
   7150 
   7151 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7152 		__func__));
   7153 
   7154 	if (icr & ICR_LSC) {
   7155 		DPRINTF(WM_DEBUG_LINK,
   7156 		    ("%s: LINK: LSC -> mii_pollstat\n",
   7157 			device_xname(sc->sc_dev)));
   7158 		mii_pollstat(&sc->sc_mii);
   7159 		if (sc->sc_type == WM_T_82543) {
   7160 			int miistatus, active;
   7161 
   7162 			/*
   7163 			 * With 82543, we need to force speed and
   7164 			 * duplex on the MAC equal to what the PHY
   7165 			 * speed and duplex configuration is.
   7166 			 */
   7167 			miistatus = sc->sc_mii.mii_media_status;
   7168 
   7169 			if (miistatus & IFM_ACTIVE) {
   7170 				active = sc->sc_mii.mii_media_active;
   7171 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7172 				switch (IFM_SUBTYPE(active)) {
   7173 				case IFM_10_T:
   7174 					sc->sc_ctrl |= CTRL_SPEED_10;
   7175 					break;
   7176 				case IFM_100_TX:
   7177 					sc->sc_ctrl |= CTRL_SPEED_100;
   7178 					break;
   7179 				case IFM_1000_T:
   7180 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7181 					break;
   7182 				default:
   7183 					/*
   7184 					 * fiber?
   7185 					 * Shoud not enter here.
   7186 					 */
   7187 					printf("unknown media (%x)\n",
   7188 					    active);
   7189 					break;
   7190 				}
   7191 				if (active & IFM_FDX)
   7192 					sc->sc_ctrl |= CTRL_FD;
   7193 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7194 			}
   7195 		} else if ((sc->sc_type == WM_T_ICH8)
   7196 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7197 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7198 		} else if (sc->sc_type == WM_T_PCH) {
   7199 			wm_k1_gig_workaround_hv(sc,
   7200 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7201 		}
   7202 
   7203 		if ((sc->sc_phytype == WMPHY_82578)
   7204 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7205 			== IFM_1000_T)) {
   7206 
   7207 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7208 				delay(200*1000); /* XXX too big */
   7209 
   7210 				/* Link stall fix for link up */
   7211 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7212 				    HV_MUX_DATA_CTRL,
   7213 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7214 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7215 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7216 				    HV_MUX_DATA_CTRL,
   7217 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7218 			}
   7219 		}
   7220 	} else if (icr & ICR_RXSEQ) {
   7221 		DPRINTF(WM_DEBUG_LINK,
   7222 		    ("%s: LINK Receive sequence error\n",
   7223 			device_xname(sc->sc_dev)));
   7224 	}
   7225 }
   7226 
   7227 /*
   7228  * wm_linkintr_tbi:
   7229  *
   7230  *	Helper; handle link interrupts for TBI mode.
   7231  */
   7232 static void
   7233 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7234 {
   7235 	uint32_t status;
   7236 
   7237 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7238 		__func__));
   7239 
   7240 	status = CSR_READ(sc, WMREG_STATUS);
   7241 	if (icr & ICR_LSC) {
   7242 		if (status & STATUS_LU) {
   7243 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7244 			    device_xname(sc->sc_dev),
   7245 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7246 			/*
   7247 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7248 			 * so we should update sc->sc_ctrl
   7249 			 */
   7250 
   7251 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7252 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7253 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7254 			if (status & STATUS_FD)
   7255 				sc->sc_tctl |=
   7256 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7257 			else
   7258 				sc->sc_tctl |=
   7259 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7260 			if (sc->sc_ctrl & CTRL_TFCE)
   7261 				sc->sc_fcrtl |= FCRTL_XONE;
   7262 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7263 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7264 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7265 				      sc->sc_fcrtl);
   7266 			sc->sc_tbi_linkup = 1;
   7267 		} else {
   7268 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7269 			    device_xname(sc->sc_dev)));
   7270 			sc->sc_tbi_linkup = 0;
   7271 		}
   7272 		/* Update LED */
   7273 		wm_tbi_serdes_set_linkled(sc);
   7274 	} else if (icr & ICR_RXSEQ) {
   7275 		DPRINTF(WM_DEBUG_LINK,
   7276 		    ("%s: LINK: Receive sequence error\n",
   7277 		    device_xname(sc->sc_dev)));
   7278 	}
   7279 }
   7280 
   7281 /*
   7282  * wm_linkintr_serdes:
   7283  *
   7284  *	Helper; handle link interrupts for TBI mode.
   7285  */
   7286 static void
   7287 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7288 {
   7289 	struct mii_data *mii = &sc->sc_mii;
   7290 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7291 	uint32_t pcs_adv, pcs_lpab, reg;
   7292 
   7293 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7294 		__func__));
   7295 
   7296 	if (icr & ICR_LSC) {
   7297 		/* Check PCS */
   7298 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7299 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7300 			mii->mii_media_status |= IFM_ACTIVE;
   7301 			sc->sc_tbi_linkup = 1;
   7302 		} else {
   7303 			mii->mii_media_status |= IFM_NONE;
   7304 			sc->sc_tbi_linkup = 0;
   7305 			wm_tbi_serdes_set_linkled(sc);
   7306 			return;
   7307 		}
   7308 		mii->mii_media_active |= IFM_1000_SX;
   7309 		if ((reg & PCS_LSTS_FDX) != 0)
   7310 			mii->mii_media_active |= IFM_FDX;
   7311 		else
   7312 			mii->mii_media_active |= IFM_HDX;
   7313 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7314 			/* Check flow */
   7315 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7316 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7317 				DPRINTF(WM_DEBUG_LINK,
   7318 				    ("XXX LINKOK but not ACOMP\n"));
   7319 				return;
   7320 			}
   7321 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7322 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7323 			DPRINTF(WM_DEBUG_LINK,
   7324 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7325 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7326 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7327 				mii->mii_media_active |= IFM_FLOW
   7328 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7329 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7330 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7331 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7332 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7333 				mii->mii_media_active |= IFM_FLOW
   7334 				    | IFM_ETH_TXPAUSE;
   7335 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7336 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7337 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7338 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7339 				mii->mii_media_active |= IFM_FLOW
   7340 				    | IFM_ETH_RXPAUSE;
   7341 		}
   7342 		/* Update LED */
   7343 		wm_tbi_serdes_set_linkled(sc);
   7344 	} else {
   7345 		DPRINTF(WM_DEBUG_LINK,
   7346 		    ("%s: LINK: Receive sequence error\n",
   7347 		    device_xname(sc->sc_dev)));
   7348 	}
   7349 }
   7350 
   7351 /*
   7352  * wm_linkintr:
   7353  *
   7354  *	Helper; handle link interrupts.
   7355  */
   7356 static void
   7357 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7358 {
   7359 
   7360 	KASSERT(WM_CORE_LOCKED(sc));
   7361 
   7362 	if (sc->sc_flags & WM_F_HAS_MII)
   7363 		wm_linkintr_gmii(sc, icr);
   7364 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7365 	    && (sc->sc_type >= WM_T_82575))
   7366 		wm_linkintr_serdes(sc, icr);
   7367 	else
   7368 		wm_linkintr_tbi(sc, icr);
   7369 }
   7370 
   7371 /*
   7372  * wm_intr_legacy:
   7373  *
   7374  *	Interrupt service routine for INTx and MSI.
   7375  */
   7376 static int
   7377 wm_intr_legacy(void *arg)
   7378 {
   7379 	struct wm_softc *sc = arg;
   7380 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7381 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7383 	uint32_t icr, rndval = 0;
   7384 	int handled = 0;
   7385 
   7386 	DPRINTF(WM_DEBUG_TX,
   7387 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7388 	while (1 /* CONSTCOND */) {
   7389 		icr = CSR_READ(sc, WMREG_ICR);
   7390 		if ((icr & sc->sc_icr) == 0)
   7391 			break;
   7392 		if (rndval == 0)
   7393 			rndval = icr;
   7394 
   7395 		WM_RX_LOCK(rxq);
   7396 
   7397 		if (sc->sc_stopping) {
   7398 			WM_RX_UNLOCK(rxq);
   7399 			break;
   7400 		}
   7401 
   7402 		handled = 1;
   7403 
   7404 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7405 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   7406 			DPRINTF(WM_DEBUG_RX,
   7407 			    ("%s: RX: got Rx intr 0x%08x\n",
   7408 			    device_xname(sc->sc_dev),
   7409 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   7410 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7411 		}
   7412 #endif
   7413 		wm_rxeof(rxq);
   7414 
   7415 		WM_RX_UNLOCK(rxq);
   7416 		WM_TX_LOCK(txq);
   7417 
   7418 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7419 		if (icr & ICR_TXDW) {
   7420 			DPRINTF(WM_DEBUG_TX,
   7421 			    ("%s: TX: got TXDW interrupt\n",
   7422 			    device_xname(sc->sc_dev)));
   7423 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7424 		}
   7425 #endif
   7426 		wm_txeof(sc);
   7427 
   7428 		WM_TX_UNLOCK(txq);
   7429 		WM_CORE_LOCK(sc);
   7430 
   7431 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   7432 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7433 			wm_linkintr(sc, icr);
   7434 		}
   7435 
   7436 		WM_CORE_UNLOCK(sc);
   7437 
   7438 		if (icr & ICR_RXO) {
   7439 #if defined(WM_DEBUG)
   7440 			log(LOG_WARNING, "%s: Receive overrun\n",
   7441 			    device_xname(sc->sc_dev));
   7442 #endif /* defined(WM_DEBUG) */
   7443 		}
   7444 	}
   7445 
   7446 	rnd_add_uint32(&sc->rnd_source, rndval);
   7447 
   7448 	if (handled) {
   7449 		/* Try to get more packets going. */
   7450 		ifp->if_start(ifp);
   7451 	}
   7452 
   7453 	return handled;
   7454 }
   7455 
   7456 #ifdef WM_MSI_MSIX
   7457 /*
   7458  * wm_txintr_msix:
   7459  *
   7460  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7461  */
   7462 static int
   7463 wm_txintr_msix(void *arg)
   7464 {
   7465 	struct wm_txqueue *txq = arg;
   7466 	struct wm_softc *sc = txq->txq_sc;
   7467 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7468 	int handled = 0;
   7469 
   7470 	DPRINTF(WM_DEBUG_TX,
   7471 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7472 
   7473 	if (sc->sc_type == WM_T_82574)
   7474 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7475 	else if (sc->sc_type == WM_T_82575)
   7476 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7477 	else
   7478 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7479 
   7480 	WM_TX_LOCK(txq);
   7481 
   7482 	if (sc->sc_stopping)
   7483 		goto out;
   7484 
   7485 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7486 	handled = wm_txeof(sc);
   7487 
   7488 out:
   7489 	WM_TX_UNLOCK(txq);
   7490 
   7491 	if (sc->sc_type == WM_T_82574)
   7492 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7493 	else if (sc->sc_type == WM_T_82575)
   7494 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7495 	else
   7496 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7497 
   7498 	if (handled) {
   7499 		/* Try to get more packets going. */
   7500 		ifp->if_start(ifp);
   7501 	}
   7502 
   7503 	return handled;
   7504 }
   7505 
   7506 /*
   7507  * wm_rxintr_msix:
   7508  *
   7509  *	Interrupt service routine for RX interrupt for MSI-X.
   7510  */
   7511 static int
   7512 wm_rxintr_msix(void *arg)
   7513 {
   7514 	struct wm_rxqueue *rxq = arg;
   7515 	struct wm_softc *sc = rxq->rxq_sc;
   7516 
   7517 	DPRINTF(WM_DEBUG_RX,
   7518 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7519 
   7520 	if (sc->sc_type == WM_T_82574)
   7521 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
   7522 	else if (sc->sc_type == WM_T_82575)
   7523 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7524 	else
   7525 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7526 
   7527 	WM_RX_LOCK(rxq);
   7528 
   7529 	if (sc->sc_stopping)
   7530 		goto out;
   7531 
   7532 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7533 	wm_rxeof(rxq);
   7534 
   7535 out:
   7536 	WM_RX_UNLOCK(rxq);
   7537 
   7538 	if (sc->sc_type == WM_T_82574)
   7539 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7540 	else if (sc->sc_type == WM_T_82575)
   7541 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7542 	else
   7543 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7544 
   7545 	return 1;
   7546 }
   7547 
   7548 /*
   7549  * wm_linkintr_msix:
   7550  *
   7551  *	Interrupt service routine for link status change for MSI-X.
   7552  */
   7553 static int
   7554 wm_linkintr_msix(void *arg)
   7555 {
   7556 	struct wm_softc *sc = arg;
   7557 	uint32_t reg;
   7558 
   7559 	DPRINTF(WM_DEBUG_LINK,
   7560 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7561 
   7562 	reg = CSR_READ(sc, WMREG_ICR);
   7563 	WM_CORE_LOCK(sc);
   7564 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7565 		goto out;
   7566 
   7567 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7568 	wm_linkintr(sc, ICR_LSC);
   7569 
   7570 out:
   7571 	WM_CORE_UNLOCK(sc);
   7572 
   7573 	if (sc->sc_type == WM_T_82574)
   7574 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
   7575 	else if (sc->sc_type == WM_T_82575)
   7576 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7577 	else
   7578 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7579 
   7580 	return 1;
   7581 }
   7582 #endif /* WM_MSI_MSIX */
   7583 
   7584 /*
   7585  * Media related.
   7586  * GMII, SGMII, TBI (and SERDES)
   7587  */
   7588 
   7589 /* Common */
   7590 
   7591 /*
   7592  * wm_tbi_serdes_set_linkled:
   7593  *
   7594  *	Update the link LED on TBI and SERDES devices.
   7595  */
   7596 static void
   7597 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7598 {
   7599 
   7600 	if (sc->sc_tbi_linkup)
   7601 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7602 	else
   7603 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7604 
   7605 	/* 82540 or newer devices are active low */
   7606 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7607 
   7608 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7609 }
   7610 
   7611 /* GMII related */
   7612 
   7613 /*
   7614  * wm_gmii_reset:
   7615  *
   7616  *	Reset the PHY.
   7617  */
   7618 static void
   7619 wm_gmii_reset(struct wm_softc *sc)
   7620 {
   7621 	uint32_t reg;
   7622 	int rv;
   7623 
   7624 	/* get phy semaphore */
   7625 	switch (sc->sc_type) {
   7626 	case WM_T_82571:
   7627 	case WM_T_82572:
   7628 	case WM_T_82573:
   7629 	case WM_T_82574:
   7630 	case WM_T_82583:
   7631 		 /* XXX should get sw semaphore, too */
   7632 		rv = wm_get_swsm_semaphore(sc);
   7633 		break;
   7634 	case WM_T_82575:
   7635 	case WM_T_82576:
   7636 	case WM_T_82580:
   7637 	case WM_T_I350:
   7638 	case WM_T_I354:
   7639 	case WM_T_I210:
   7640 	case WM_T_I211:
   7641 	case WM_T_80003:
   7642 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7643 		break;
   7644 	case WM_T_ICH8:
   7645 	case WM_T_ICH9:
   7646 	case WM_T_ICH10:
   7647 	case WM_T_PCH:
   7648 	case WM_T_PCH2:
   7649 	case WM_T_PCH_LPT:
   7650 		rv = wm_get_swfwhw_semaphore(sc);
   7651 		break;
   7652 	default:
   7653 		/* nothing to do*/
   7654 		rv = 0;
   7655 		break;
   7656 	}
   7657 	if (rv != 0) {
   7658 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7659 		    __func__);
   7660 		return;
   7661 	}
   7662 
   7663 	switch (sc->sc_type) {
   7664 	case WM_T_82542_2_0:
   7665 	case WM_T_82542_2_1:
   7666 		/* null */
   7667 		break;
   7668 	case WM_T_82543:
   7669 		/*
   7670 		 * With 82543, we need to force speed and duplex on the MAC
   7671 		 * equal to what the PHY speed and duplex configuration is.
   7672 		 * In addition, we need to perform a hardware reset on the PHY
   7673 		 * to take it out of reset.
   7674 		 */
   7675 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7676 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7677 
   7678 		/* The PHY reset pin is active-low. */
   7679 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7680 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7681 		    CTRL_EXT_SWDPIN(4));
   7682 		reg |= CTRL_EXT_SWDPIO(4);
   7683 
   7684 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7685 		CSR_WRITE_FLUSH(sc);
   7686 		delay(10*1000);
   7687 
   7688 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7689 		CSR_WRITE_FLUSH(sc);
   7690 		delay(150);
   7691 #if 0
   7692 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7693 #endif
   7694 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7695 		break;
   7696 	case WM_T_82544:	/* reset 10000us */
   7697 	case WM_T_82540:
   7698 	case WM_T_82545:
   7699 	case WM_T_82545_3:
   7700 	case WM_T_82546:
   7701 	case WM_T_82546_3:
   7702 	case WM_T_82541:
   7703 	case WM_T_82541_2:
   7704 	case WM_T_82547:
   7705 	case WM_T_82547_2:
   7706 	case WM_T_82571:	/* reset 100us */
   7707 	case WM_T_82572:
   7708 	case WM_T_82573:
   7709 	case WM_T_82574:
   7710 	case WM_T_82575:
   7711 	case WM_T_82576:
   7712 	case WM_T_82580:
   7713 	case WM_T_I350:
   7714 	case WM_T_I354:
   7715 	case WM_T_I210:
   7716 	case WM_T_I211:
   7717 	case WM_T_82583:
   7718 	case WM_T_80003:
   7719 		/* generic reset */
   7720 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7721 		CSR_WRITE_FLUSH(sc);
   7722 		delay(20000);
   7723 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7724 		CSR_WRITE_FLUSH(sc);
   7725 		delay(20000);
   7726 
   7727 		if ((sc->sc_type == WM_T_82541)
   7728 		    || (sc->sc_type == WM_T_82541_2)
   7729 		    || (sc->sc_type == WM_T_82547)
   7730 		    || (sc->sc_type == WM_T_82547_2)) {
   7731 			/* workaround for igp are done in igp_reset() */
   7732 			/* XXX add code to set LED after phy reset */
   7733 		}
   7734 		break;
   7735 	case WM_T_ICH8:
   7736 	case WM_T_ICH9:
   7737 	case WM_T_ICH10:
   7738 	case WM_T_PCH:
   7739 	case WM_T_PCH2:
   7740 	case WM_T_PCH_LPT:
   7741 		/* generic reset */
   7742 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7743 		CSR_WRITE_FLUSH(sc);
   7744 		delay(100);
   7745 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7746 		CSR_WRITE_FLUSH(sc);
   7747 		delay(150);
   7748 		break;
   7749 	default:
   7750 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7751 		    __func__);
   7752 		break;
   7753 	}
   7754 
   7755 	/* release PHY semaphore */
   7756 	switch (sc->sc_type) {
   7757 	case WM_T_82571:
   7758 	case WM_T_82572:
   7759 	case WM_T_82573:
   7760 	case WM_T_82574:
   7761 	case WM_T_82583:
   7762 		 /* XXX should put sw semaphore, too */
   7763 		wm_put_swsm_semaphore(sc);
   7764 		break;
   7765 	case WM_T_82575:
   7766 	case WM_T_82576:
   7767 	case WM_T_82580:
   7768 	case WM_T_I350:
   7769 	case WM_T_I354:
   7770 	case WM_T_I210:
   7771 	case WM_T_I211:
   7772 	case WM_T_80003:
   7773 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7774 		break;
   7775 	case WM_T_ICH8:
   7776 	case WM_T_ICH9:
   7777 	case WM_T_ICH10:
   7778 	case WM_T_PCH:
   7779 	case WM_T_PCH2:
   7780 	case WM_T_PCH_LPT:
   7781 		wm_put_swfwhw_semaphore(sc);
   7782 		break;
   7783 	default:
   7784 		/* nothing to do*/
   7785 		rv = 0;
   7786 		break;
   7787 	}
   7788 
   7789 	/* get_cfg_done */
   7790 	wm_get_cfg_done(sc);
   7791 
   7792 	/* extra setup */
   7793 	switch (sc->sc_type) {
   7794 	case WM_T_82542_2_0:
   7795 	case WM_T_82542_2_1:
   7796 	case WM_T_82543:
   7797 	case WM_T_82544:
   7798 	case WM_T_82540:
   7799 	case WM_T_82545:
   7800 	case WM_T_82545_3:
   7801 	case WM_T_82546:
   7802 	case WM_T_82546_3:
   7803 	case WM_T_82541_2:
   7804 	case WM_T_82547_2:
   7805 	case WM_T_82571:
   7806 	case WM_T_82572:
   7807 	case WM_T_82573:
   7808 	case WM_T_82574:
   7809 	case WM_T_82575:
   7810 	case WM_T_82576:
   7811 	case WM_T_82580:
   7812 	case WM_T_I350:
   7813 	case WM_T_I354:
   7814 	case WM_T_I210:
   7815 	case WM_T_I211:
   7816 	case WM_T_82583:
   7817 	case WM_T_80003:
   7818 		/* null */
   7819 		break;
   7820 	case WM_T_82541:
   7821 	case WM_T_82547:
   7822 		/* XXX Configure actively LED after PHY reset */
   7823 		break;
   7824 	case WM_T_ICH8:
   7825 	case WM_T_ICH9:
   7826 	case WM_T_ICH10:
   7827 	case WM_T_PCH:
   7828 	case WM_T_PCH2:
   7829 	case WM_T_PCH_LPT:
   7830 		/* Allow time for h/w to get to a quiescent state afer reset */
   7831 		delay(10*1000);
   7832 
   7833 		if (sc->sc_type == WM_T_PCH)
   7834 			wm_hv_phy_workaround_ich8lan(sc);
   7835 
   7836 		if (sc->sc_type == WM_T_PCH2)
   7837 			wm_lv_phy_workaround_ich8lan(sc);
   7838 
   7839 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7840 			/*
   7841 			 * dummy read to clear the phy wakeup bit after lcd
   7842 			 * reset
   7843 			 */
   7844 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7845 		}
   7846 
   7847 		/*
   7848 		 * XXX Configure the LCD with th extended configuration region
   7849 		 * in NVM
   7850 		 */
   7851 
   7852 		/* Configure the LCD with the OEM bits in NVM */
   7853 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   7854 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   7855 			/*
   7856 			 * Disable LPLU.
   7857 			 * XXX It seems that 82567 has LPLU, too.
   7858 			 */
   7859 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   7860 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   7861 			reg |= HV_OEM_BITS_ANEGNOW;
   7862 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   7863 		}
   7864 		break;
   7865 	default:
   7866 		panic("%s: unknown type\n", __func__);
   7867 		break;
   7868 	}
   7869 }
   7870 
   7871 /*
   7872  * wm_get_phy_id_82575:
   7873  *
   7874  * Return PHY ID. Return -1 if it failed.
   7875  */
   7876 static int
   7877 wm_get_phy_id_82575(struct wm_softc *sc)
   7878 {
   7879 	uint32_t reg;
   7880 	int phyid = -1;
   7881 
   7882 	/* XXX */
   7883 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7884 		return -1;
   7885 
   7886 	if (wm_sgmii_uses_mdio(sc)) {
   7887 		switch (sc->sc_type) {
   7888 		case WM_T_82575:
   7889 		case WM_T_82576:
   7890 			reg = CSR_READ(sc, WMREG_MDIC);
   7891 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7892 			break;
   7893 		case WM_T_82580:
   7894 		case WM_T_I350:
   7895 		case WM_T_I354:
   7896 		case WM_T_I210:
   7897 		case WM_T_I211:
   7898 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7899 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7900 			break;
   7901 		default:
   7902 			return -1;
   7903 		}
   7904 	}
   7905 
   7906 	return phyid;
   7907 }
   7908 
   7909 
   7910 /*
   7911  * wm_gmii_mediainit:
   7912  *
   7913  *	Initialize media for use on 1000BASE-T devices.
   7914  */
   7915 static void
   7916 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   7917 {
   7918 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7919 	struct mii_data *mii = &sc->sc_mii;
   7920 	uint32_t reg;
   7921 
   7922 	/* We have GMII. */
   7923 	sc->sc_flags |= WM_F_HAS_MII;
   7924 
   7925 	if (sc->sc_type == WM_T_80003)
   7926 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7927 	else
   7928 		sc->sc_tipg = TIPG_1000T_DFLT;
   7929 
   7930 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   7931 	if ((sc->sc_type == WM_T_82580)
   7932 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   7933 	    || (sc->sc_type == WM_T_I211)) {
   7934 		reg = CSR_READ(sc, WMREG_PHPM);
   7935 		reg &= ~PHPM_GO_LINK_D;
   7936 		CSR_WRITE(sc, WMREG_PHPM, reg);
   7937 	}
   7938 
   7939 	/*
   7940 	 * Let the chip set speed/duplex on its own based on
   7941 	 * signals from the PHY.
   7942 	 * XXXbouyer - I'm not sure this is right for the 80003,
   7943 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   7944 	 */
   7945 	sc->sc_ctrl |= CTRL_SLU;
   7946 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7947 
   7948 	/* Initialize our media structures and probe the GMII. */
   7949 	mii->mii_ifp = ifp;
   7950 
   7951 	/*
   7952 	 * Determine the PHY access method.
   7953 	 *
   7954 	 *  For SGMII, use SGMII specific method.
   7955 	 *
   7956 	 *  For some devices, we can determine the PHY access method
   7957 	 * from sc_type.
   7958 	 *
   7959 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   7960 	 * access  method by sc_type, so use the PCI product ID for some
   7961 	 * devices.
   7962 	 * For other ICH8 variants, try to use igp's method. If the PHY
   7963 	 * can't detect, then use bm's method.
   7964 	 */
   7965 	switch (prodid) {
   7966 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   7967 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   7968 		/* 82577 */
   7969 		sc->sc_phytype = WMPHY_82577;
   7970 		break;
   7971 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   7972 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   7973 		/* 82578 */
   7974 		sc->sc_phytype = WMPHY_82578;
   7975 		break;
   7976 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   7977 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   7978 		/* 82579 */
   7979 		sc->sc_phytype = WMPHY_82579;
   7980 		break;
   7981 	case PCI_PRODUCT_INTEL_82801I_BM:
   7982 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   7983 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   7984 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   7985 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   7986 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   7987 		/* 82567 */
   7988 		sc->sc_phytype = WMPHY_BM;
   7989 		mii->mii_readreg = wm_gmii_bm_readreg;
   7990 		mii->mii_writereg = wm_gmii_bm_writereg;
   7991 		break;
   7992 	default:
   7993 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   7994 		    && !wm_sgmii_uses_mdio(sc)){
   7995 			/* SGMII */
   7996 			mii->mii_readreg = wm_sgmii_readreg;
   7997 			mii->mii_writereg = wm_sgmii_writereg;
   7998 		} else if (sc->sc_type >= WM_T_80003) {
   7999 			/* 80003 */
   8000 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8001 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8002 		} else if (sc->sc_type >= WM_T_I210) {
   8003 			/* I210 and I211 */
   8004 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8005 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8006 		} else if (sc->sc_type >= WM_T_82580) {
   8007 			/* 82580, I350 and I354 */
   8008 			sc->sc_phytype = WMPHY_82580;
   8009 			mii->mii_readreg = wm_gmii_82580_readreg;
   8010 			mii->mii_writereg = wm_gmii_82580_writereg;
   8011 		} else if (sc->sc_type >= WM_T_82544) {
   8012 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8013 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8014 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8015 		} else {
   8016 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8017 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8018 		}
   8019 		break;
   8020 	}
   8021 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   8022 		/* All PCH* use _hv_ */
   8023 		mii->mii_readreg = wm_gmii_hv_readreg;
   8024 		mii->mii_writereg = wm_gmii_hv_writereg;
   8025 	}
   8026 	mii->mii_statchg = wm_gmii_statchg;
   8027 
   8028 	wm_gmii_reset(sc);
   8029 
   8030 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8031 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8032 	    wm_gmii_mediastatus);
   8033 
   8034 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8035 	    || (sc->sc_type == WM_T_82580)
   8036 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8037 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8038 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8039 			/* Attach only one port */
   8040 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8041 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8042 		} else {
   8043 			int i, id;
   8044 			uint32_t ctrl_ext;
   8045 
   8046 			id = wm_get_phy_id_82575(sc);
   8047 			if (id != -1) {
   8048 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8049 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8050 			}
   8051 			if ((id == -1)
   8052 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8053 				/* Power on sgmii phy if it is disabled */
   8054 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8055 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8056 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8057 				CSR_WRITE_FLUSH(sc);
   8058 				delay(300*1000); /* XXX too long */
   8059 
   8060 				/* from 1 to 8 */
   8061 				for (i = 1; i < 8; i++)
   8062 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8063 					    0xffffffff, i, MII_OFFSET_ANY,
   8064 					    MIIF_DOPAUSE);
   8065 
   8066 				/* restore previous sfp cage power state */
   8067 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8068 			}
   8069 		}
   8070 	} else {
   8071 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8072 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8073 	}
   8074 
   8075 	/*
   8076 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8077 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8078 	 */
   8079 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8080 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8081 		wm_set_mdio_slow_mode_hv(sc);
   8082 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8083 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8084 	}
   8085 
   8086 	/*
   8087 	 * (For ICH8 variants)
   8088 	 * If PHY detection failed, use BM's r/w function and retry.
   8089 	 */
   8090 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8091 		/* if failed, retry with *_bm_* */
   8092 		mii->mii_readreg = wm_gmii_bm_readreg;
   8093 		mii->mii_writereg = wm_gmii_bm_writereg;
   8094 
   8095 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8096 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8097 	}
   8098 
   8099 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8100 		/* Any PHY wasn't find */
   8101 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   8102 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   8103 		sc->sc_phytype = WMPHY_NONE;
   8104 	} else {
   8105 		/*
   8106 		 * PHY Found!
   8107 		 * Check PHY type.
   8108 		 */
   8109 		uint32_t model;
   8110 		struct mii_softc *child;
   8111 
   8112 		child = LIST_FIRST(&mii->mii_phys);
   8113 		if (device_is_a(child->mii_dev, "igphy")) {
   8114 			struct igphy_softc *isc = (struct igphy_softc *)child;
   8115 
   8116 			model = isc->sc_mii.mii_mpd_model;
   8117 			if (model == MII_MODEL_yyINTEL_I82566)
   8118 				sc->sc_phytype = WMPHY_IGP_3;
   8119 		}
   8120 
   8121 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8122 	}
   8123 }
   8124 
   8125 /*
   8126  * wm_gmii_mediachange:	[ifmedia interface function]
   8127  *
   8128  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8129  */
   8130 static int
   8131 wm_gmii_mediachange(struct ifnet *ifp)
   8132 {
   8133 	struct wm_softc *sc = ifp->if_softc;
   8134 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8135 	int rc;
   8136 
   8137 	if ((ifp->if_flags & IFF_UP) == 0)
   8138 		return 0;
   8139 
   8140 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8141 	sc->sc_ctrl |= CTRL_SLU;
   8142 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8143 	    || (sc->sc_type > WM_T_82543)) {
   8144 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8145 	} else {
   8146 		sc->sc_ctrl &= ~CTRL_ASDE;
   8147 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8148 		if (ife->ifm_media & IFM_FDX)
   8149 			sc->sc_ctrl |= CTRL_FD;
   8150 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8151 		case IFM_10_T:
   8152 			sc->sc_ctrl |= CTRL_SPEED_10;
   8153 			break;
   8154 		case IFM_100_TX:
   8155 			sc->sc_ctrl |= CTRL_SPEED_100;
   8156 			break;
   8157 		case IFM_1000_T:
   8158 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8159 			break;
   8160 		default:
   8161 			panic("wm_gmii_mediachange: bad media 0x%x",
   8162 			    ife->ifm_media);
   8163 		}
   8164 	}
   8165 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8166 	if (sc->sc_type <= WM_T_82543)
   8167 		wm_gmii_reset(sc);
   8168 
   8169 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8170 		return 0;
   8171 	return rc;
   8172 }
   8173 
   8174 /*
   8175  * wm_gmii_mediastatus:	[ifmedia interface function]
   8176  *
   8177  *	Get the current interface media status on a 1000BASE-T device.
   8178  */
   8179 static void
   8180 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8181 {
   8182 	struct wm_softc *sc = ifp->if_softc;
   8183 
   8184 	ether_mediastatus(ifp, ifmr);
   8185 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8186 	    | sc->sc_flowflags;
   8187 }
   8188 
   8189 #define	MDI_IO		CTRL_SWDPIN(2)
   8190 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8191 #define	MDI_CLK		CTRL_SWDPIN(3)
   8192 
   8193 static void
   8194 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8195 {
   8196 	uint32_t i, v;
   8197 
   8198 	v = CSR_READ(sc, WMREG_CTRL);
   8199 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8200 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8201 
   8202 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8203 		if (data & i)
   8204 			v |= MDI_IO;
   8205 		else
   8206 			v &= ~MDI_IO;
   8207 		CSR_WRITE(sc, WMREG_CTRL, v);
   8208 		CSR_WRITE_FLUSH(sc);
   8209 		delay(10);
   8210 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8211 		CSR_WRITE_FLUSH(sc);
   8212 		delay(10);
   8213 		CSR_WRITE(sc, WMREG_CTRL, v);
   8214 		CSR_WRITE_FLUSH(sc);
   8215 		delay(10);
   8216 	}
   8217 }
   8218 
   8219 static uint32_t
   8220 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8221 {
   8222 	uint32_t v, i, data = 0;
   8223 
   8224 	v = CSR_READ(sc, WMREG_CTRL);
   8225 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8226 	v |= CTRL_SWDPIO(3);
   8227 
   8228 	CSR_WRITE(sc, WMREG_CTRL, v);
   8229 	CSR_WRITE_FLUSH(sc);
   8230 	delay(10);
   8231 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8232 	CSR_WRITE_FLUSH(sc);
   8233 	delay(10);
   8234 	CSR_WRITE(sc, WMREG_CTRL, v);
   8235 	CSR_WRITE_FLUSH(sc);
   8236 	delay(10);
   8237 
   8238 	for (i = 0; i < 16; i++) {
   8239 		data <<= 1;
   8240 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8241 		CSR_WRITE_FLUSH(sc);
   8242 		delay(10);
   8243 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8244 			data |= 1;
   8245 		CSR_WRITE(sc, WMREG_CTRL, v);
   8246 		CSR_WRITE_FLUSH(sc);
   8247 		delay(10);
   8248 	}
   8249 
   8250 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8251 	CSR_WRITE_FLUSH(sc);
   8252 	delay(10);
   8253 	CSR_WRITE(sc, WMREG_CTRL, v);
   8254 	CSR_WRITE_FLUSH(sc);
   8255 	delay(10);
   8256 
   8257 	return data;
   8258 }
   8259 
   8260 #undef MDI_IO
   8261 #undef MDI_DIR
   8262 #undef MDI_CLK
   8263 
   8264 /*
   8265  * wm_gmii_i82543_readreg:	[mii interface function]
   8266  *
   8267  *	Read a PHY register on the GMII (i82543 version).
   8268  */
   8269 static int
   8270 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8271 {
   8272 	struct wm_softc *sc = device_private(self);
   8273 	int rv;
   8274 
   8275 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8276 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8277 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8278 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8279 
   8280 	DPRINTF(WM_DEBUG_GMII,
   8281 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8282 	    device_xname(sc->sc_dev), phy, reg, rv));
   8283 
   8284 	return rv;
   8285 }
   8286 
   8287 /*
   8288  * wm_gmii_i82543_writereg:	[mii interface function]
   8289  *
   8290  *	Write a PHY register on the GMII (i82543 version).
   8291  */
   8292 static void
   8293 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8294 {
   8295 	struct wm_softc *sc = device_private(self);
   8296 
   8297 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8298 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8299 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8300 	    (MII_COMMAND_START << 30), 32);
   8301 }
   8302 
   8303 /*
   8304  * wm_gmii_i82544_readreg:	[mii interface function]
   8305  *
   8306  *	Read a PHY register on the GMII.
   8307  */
   8308 static int
   8309 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8310 {
   8311 	struct wm_softc *sc = device_private(self);
   8312 	uint32_t mdic = 0;
   8313 	int i, rv;
   8314 
   8315 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8316 	    MDIC_REGADD(reg));
   8317 
   8318 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8319 		mdic = CSR_READ(sc, WMREG_MDIC);
   8320 		if (mdic & MDIC_READY)
   8321 			break;
   8322 		delay(50);
   8323 	}
   8324 
   8325 	if ((mdic & MDIC_READY) == 0) {
   8326 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8327 		    device_xname(sc->sc_dev), phy, reg);
   8328 		rv = 0;
   8329 	} else if (mdic & MDIC_E) {
   8330 #if 0 /* This is normal if no PHY is present. */
   8331 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8332 		    device_xname(sc->sc_dev), phy, reg);
   8333 #endif
   8334 		rv = 0;
   8335 	} else {
   8336 		rv = MDIC_DATA(mdic);
   8337 		if (rv == 0xffff)
   8338 			rv = 0;
   8339 	}
   8340 
   8341 	return rv;
   8342 }
   8343 
   8344 /*
   8345  * wm_gmii_i82544_writereg:	[mii interface function]
   8346  *
   8347  *	Write a PHY register on the GMII.
   8348  */
   8349 static void
   8350 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8351 {
   8352 	struct wm_softc *sc = device_private(self);
   8353 	uint32_t mdic = 0;
   8354 	int i;
   8355 
   8356 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8357 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8358 
   8359 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8360 		mdic = CSR_READ(sc, WMREG_MDIC);
   8361 		if (mdic & MDIC_READY)
   8362 			break;
   8363 		delay(50);
   8364 	}
   8365 
   8366 	if ((mdic & MDIC_READY) == 0)
   8367 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8368 		    device_xname(sc->sc_dev), phy, reg);
   8369 	else if (mdic & MDIC_E)
   8370 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8371 		    device_xname(sc->sc_dev), phy, reg);
   8372 }
   8373 
   8374 /*
   8375  * wm_gmii_i80003_readreg:	[mii interface function]
   8376  *
   8377  *	Read a PHY register on the kumeran
   8378  * This could be handled by the PHY layer if we didn't have to lock the
   8379  * ressource ...
   8380  */
   8381 static int
   8382 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8383 {
   8384 	struct wm_softc *sc = device_private(self);
   8385 	int sem;
   8386 	int rv;
   8387 
   8388 	if (phy != 1) /* only one PHY on kumeran bus */
   8389 		return 0;
   8390 
   8391 	sem = swfwphysem[sc->sc_funcid];
   8392 	if (wm_get_swfw_semaphore(sc, sem)) {
   8393 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8394 		    __func__);
   8395 		return 0;
   8396 	}
   8397 
   8398 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8399 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8400 		    reg >> GG82563_PAGE_SHIFT);
   8401 	} else {
   8402 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8403 		    reg >> GG82563_PAGE_SHIFT);
   8404 	}
   8405 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8406 	delay(200);
   8407 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8408 	delay(200);
   8409 
   8410 	wm_put_swfw_semaphore(sc, sem);
   8411 	return rv;
   8412 }
   8413 
   8414 /*
   8415  * wm_gmii_i80003_writereg:	[mii interface function]
   8416  *
   8417  *	Write a PHY register on the kumeran.
   8418  * This could be handled by the PHY layer if we didn't have to lock the
   8419  * ressource ...
   8420  */
   8421 static void
   8422 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8423 {
   8424 	struct wm_softc *sc = device_private(self);
   8425 	int sem;
   8426 
   8427 	if (phy != 1) /* only one PHY on kumeran bus */
   8428 		return;
   8429 
   8430 	sem = swfwphysem[sc->sc_funcid];
   8431 	if (wm_get_swfw_semaphore(sc, sem)) {
   8432 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8433 		    __func__);
   8434 		return;
   8435 	}
   8436 
   8437 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8438 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8439 		    reg >> GG82563_PAGE_SHIFT);
   8440 	} else {
   8441 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8442 		    reg >> GG82563_PAGE_SHIFT);
   8443 	}
   8444 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8445 	delay(200);
   8446 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8447 	delay(200);
   8448 
   8449 	wm_put_swfw_semaphore(sc, sem);
   8450 }
   8451 
   8452 /*
   8453  * wm_gmii_bm_readreg:	[mii interface function]
   8454  *
   8455  *	Read a PHY register on the kumeran
   8456  * This could be handled by the PHY layer if we didn't have to lock the
   8457  * ressource ...
   8458  */
   8459 static int
   8460 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8461 {
   8462 	struct wm_softc *sc = device_private(self);
   8463 	int sem;
   8464 	int rv;
   8465 
   8466 	sem = swfwphysem[sc->sc_funcid];
   8467 	if (wm_get_swfw_semaphore(sc, sem)) {
   8468 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8469 		    __func__);
   8470 		return 0;
   8471 	}
   8472 
   8473 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8474 		if (phy == 1)
   8475 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8476 			    reg);
   8477 		else
   8478 			wm_gmii_i82544_writereg(self, phy,
   8479 			    GG82563_PHY_PAGE_SELECT,
   8480 			    reg >> GG82563_PAGE_SHIFT);
   8481 	}
   8482 
   8483 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8484 	wm_put_swfw_semaphore(sc, sem);
   8485 	return rv;
   8486 }
   8487 
   8488 /*
   8489  * wm_gmii_bm_writereg:	[mii interface function]
   8490  *
   8491  *	Write a PHY register on the kumeran.
   8492  * This could be handled by the PHY layer if we didn't have to lock the
   8493  * ressource ...
   8494  */
   8495 static void
   8496 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8497 {
   8498 	struct wm_softc *sc = device_private(self);
   8499 	int sem;
   8500 
   8501 	sem = swfwphysem[sc->sc_funcid];
   8502 	if (wm_get_swfw_semaphore(sc, sem)) {
   8503 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8504 		    __func__);
   8505 		return;
   8506 	}
   8507 
   8508 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8509 		if (phy == 1)
   8510 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8511 			    reg);
   8512 		else
   8513 			wm_gmii_i82544_writereg(self, phy,
   8514 			    GG82563_PHY_PAGE_SELECT,
   8515 			    reg >> GG82563_PAGE_SHIFT);
   8516 	}
   8517 
   8518 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8519 	wm_put_swfw_semaphore(sc, sem);
   8520 }
   8521 
   8522 static void
   8523 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8524 {
   8525 	struct wm_softc *sc = device_private(self);
   8526 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8527 	uint16_t wuce;
   8528 
   8529 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8530 	if (sc->sc_type == WM_T_PCH) {
   8531 		/* XXX e1000 driver do nothing... why? */
   8532 	}
   8533 
   8534 	/* Set page 769 */
   8535 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8536 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8537 
   8538 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8539 
   8540 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8541 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8542 	    wuce | BM_WUC_ENABLE_BIT);
   8543 
   8544 	/* Select page 800 */
   8545 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8546 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8547 
   8548 	/* Write page 800 */
   8549 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8550 
   8551 	if (rd)
   8552 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8553 	else
   8554 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8555 
   8556 	/* Set page 769 */
   8557 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8558 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8559 
   8560 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8561 }
   8562 
   8563 /*
   8564  * wm_gmii_hv_readreg:	[mii interface function]
   8565  *
   8566  *	Read a PHY register on the kumeran
   8567  * This could be handled by the PHY layer if we didn't have to lock the
   8568  * ressource ...
   8569  */
   8570 static int
   8571 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8572 {
   8573 	struct wm_softc *sc = device_private(self);
   8574 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8575 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8576 	uint16_t val;
   8577 	int rv;
   8578 
   8579 	if (wm_get_swfwhw_semaphore(sc)) {
   8580 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8581 		    __func__);
   8582 		return 0;
   8583 	}
   8584 
   8585 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8586 	if (sc->sc_phytype == WMPHY_82577) {
   8587 		/* XXX must write */
   8588 	}
   8589 
   8590 	/* Page 800 works differently than the rest so it has its own func */
   8591 	if (page == BM_WUC_PAGE) {
   8592 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8593 		return val;
   8594 	}
   8595 
   8596 	/*
   8597 	 * Lower than page 768 works differently than the rest so it has its
   8598 	 * own func
   8599 	 */
   8600 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8601 		printf("gmii_hv_readreg!!!\n");
   8602 		return 0;
   8603 	}
   8604 
   8605 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8606 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8607 		    page << BME1000_PAGE_SHIFT);
   8608 	}
   8609 
   8610 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8611 	wm_put_swfwhw_semaphore(sc);
   8612 	return rv;
   8613 }
   8614 
   8615 /*
   8616  * wm_gmii_hv_writereg:	[mii interface function]
   8617  *
   8618  *	Write a PHY register on the kumeran.
   8619  * This could be handled by the PHY layer if we didn't have to lock the
   8620  * ressource ...
   8621  */
   8622 static void
   8623 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8624 {
   8625 	struct wm_softc *sc = device_private(self);
   8626 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8627 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8628 
   8629 	if (wm_get_swfwhw_semaphore(sc)) {
   8630 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8631 		    __func__);
   8632 		return;
   8633 	}
   8634 
   8635 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8636 
   8637 	/* Page 800 works differently than the rest so it has its own func */
   8638 	if (page == BM_WUC_PAGE) {
   8639 		uint16_t tmp;
   8640 
   8641 		tmp = val;
   8642 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8643 		return;
   8644 	}
   8645 
   8646 	/*
   8647 	 * Lower than page 768 works differently than the rest so it has its
   8648 	 * own func
   8649 	 */
   8650 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8651 		printf("gmii_hv_writereg!!!\n");
   8652 		return;
   8653 	}
   8654 
   8655 	/*
   8656 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8657 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8658 	 */
   8659 
   8660 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8661 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8662 		    page << BME1000_PAGE_SHIFT);
   8663 	}
   8664 
   8665 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8666 	wm_put_swfwhw_semaphore(sc);
   8667 }
   8668 
   8669 /*
   8670  * wm_gmii_82580_readreg:	[mii interface function]
   8671  *
   8672  *	Read a PHY register on the 82580 and I350.
   8673  * This could be handled by the PHY layer if we didn't have to lock the
   8674  * ressource ...
   8675  */
   8676 static int
   8677 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8678 {
   8679 	struct wm_softc *sc = device_private(self);
   8680 	int sem;
   8681 	int rv;
   8682 
   8683 	sem = swfwphysem[sc->sc_funcid];
   8684 	if (wm_get_swfw_semaphore(sc, sem)) {
   8685 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8686 		    __func__);
   8687 		return 0;
   8688 	}
   8689 
   8690 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8691 
   8692 	wm_put_swfw_semaphore(sc, sem);
   8693 	return rv;
   8694 }
   8695 
   8696 /*
   8697  * wm_gmii_82580_writereg:	[mii interface function]
   8698  *
   8699  *	Write a PHY register on the 82580 and I350.
   8700  * This could be handled by the PHY layer if we didn't have to lock the
   8701  * ressource ...
   8702  */
   8703 static void
   8704 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8705 {
   8706 	struct wm_softc *sc = device_private(self);
   8707 	int sem;
   8708 
   8709 	sem = swfwphysem[sc->sc_funcid];
   8710 	if (wm_get_swfw_semaphore(sc, sem)) {
   8711 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8712 		    __func__);
   8713 		return;
   8714 	}
   8715 
   8716 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8717 
   8718 	wm_put_swfw_semaphore(sc, sem);
   8719 }
   8720 
   8721 /*
   8722  * wm_gmii_gs40g_readreg:	[mii interface function]
   8723  *
   8724  *	Read a PHY register on the I2100 and I211.
   8725  * This could be handled by the PHY layer if we didn't have to lock the
   8726  * ressource ...
   8727  */
   8728 static int
   8729 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8730 {
   8731 	struct wm_softc *sc = device_private(self);
   8732 	int sem;
   8733 	int page, offset;
   8734 	int rv;
   8735 
   8736 	/* Acquire semaphore */
   8737 	sem = swfwphysem[sc->sc_funcid];
   8738 	if (wm_get_swfw_semaphore(sc, sem)) {
   8739 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8740 		    __func__);
   8741 		return 0;
   8742 	}
   8743 
   8744 	/* Page select */
   8745 	page = reg >> GS40G_PAGE_SHIFT;
   8746 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8747 
   8748 	/* Read reg */
   8749 	offset = reg & GS40G_OFFSET_MASK;
   8750 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8751 
   8752 	wm_put_swfw_semaphore(sc, sem);
   8753 	return rv;
   8754 }
   8755 
   8756 /*
   8757  * wm_gmii_gs40g_writereg:	[mii interface function]
   8758  *
   8759  *	Write a PHY register on the I210 and I211.
   8760  * This could be handled by the PHY layer if we didn't have to lock the
   8761  * ressource ...
   8762  */
   8763 static void
   8764 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8765 {
   8766 	struct wm_softc *sc = device_private(self);
   8767 	int sem;
   8768 	int page, offset;
   8769 
   8770 	/* Acquire semaphore */
   8771 	sem = swfwphysem[sc->sc_funcid];
   8772 	if (wm_get_swfw_semaphore(sc, sem)) {
   8773 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8774 		    __func__);
   8775 		return;
   8776 	}
   8777 
   8778 	/* Page select */
   8779 	page = reg >> GS40G_PAGE_SHIFT;
   8780 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8781 
   8782 	/* Write reg */
   8783 	offset = reg & GS40G_OFFSET_MASK;
   8784 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8785 
   8786 	/* Release semaphore */
   8787 	wm_put_swfw_semaphore(sc, sem);
   8788 }
   8789 
   8790 /*
   8791  * wm_gmii_statchg:	[mii interface function]
   8792  *
   8793  *	Callback from MII layer when media changes.
   8794  */
   8795 static void
   8796 wm_gmii_statchg(struct ifnet *ifp)
   8797 {
   8798 	struct wm_softc *sc = ifp->if_softc;
   8799 	struct mii_data *mii = &sc->sc_mii;
   8800 
   8801 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8802 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8803 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8804 
   8805 	/*
   8806 	 * Get flow control negotiation result.
   8807 	 */
   8808 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8809 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8810 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8811 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8812 	}
   8813 
   8814 	if (sc->sc_flowflags & IFM_FLOW) {
   8815 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8816 			sc->sc_ctrl |= CTRL_TFCE;
   8817 			sc->sc_fcrtl |= FCRTL_XONE;
   8818 		}
   8819 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8820 			sc->sc_ctrl |= CTRL_RFCE;
   8821 	}
   8822 
   8823 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8824 		DPRINTF(WM_DEBUG_LINK,
   8825 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8826 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8827 	} else {
   8828 		DPRINTF(WM_DEBUG_LINK,
   8829 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8830 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8831 	}
   8832 
   8833 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8834 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8835 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8836 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8837 	if (sc->sc_type == WM_T_80003) {
   8838 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8839 		case IFM_1000_T:
   8840 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8841 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8842 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8843 			break;
   8844 		default:
   8845 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8846 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8847 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8848 			break;
   8849 		}
   8850 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8851 	}
   8852 }
   8853 
   8854 /*
   8855  * wm_kmrn_readreg:
   8856  *
   8857  *	Read a kumeran register
   8858  */
   8859 static int
   8860 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8861 {
   8862 	int rv;
   8863 
   8864 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8865 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8866 			aprint_error_dev(sc->sc_dev,
   8867 			    "%s: failed to get semaphore\n", __func__);
   8868 			return 0;
   8869 		}
   8870 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8871 		if (wm_get_swfwhw_semaphore(sc)) {
   8872 			aprint_error_dev(sc->sc_dev,
   8873 			    "%s: failed to get semaphore\n", __func__);
   8874 			return 0;
   8875 		}
   8876 	}
   8877 
   8878 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8879 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8880 	    KUMCTRLSTA_REN);
   8881 	CSR_WRITE_FLUSH(sc);
   8882 	delay(2);
   8883 
   8884 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8885 
   8886 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8887 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8888 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8889 		wm_put_swfwhw_semaphore(sc);
   8890 
   8891 	return rv;
   8892 }
   8893 
   8894 /*
   8895  * wm_kmrn_writereg:
   8896  *
   8897  *	Write a kumeran register
   8898  */
   8899 static void
   8900 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8901 {
   8902 
   8903 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8904 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8905 			aprint_error_dev(sc->sc_dev,
   8906 			    "%s: failed to get semaphore\n", __func__);
   8907 			return;
   8908 		}
   8909 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8910 		if (wm_get_swfwhw_semaphore(sc)) {
   8911 			aprint_error_dev(sc->sc_dev,
   8912 			    "%s: failed to get semaphore\n", __func__);
   8913 			return;
   8914 		}
   8915 	}
   8916 
   8917 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8918 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8919 	    (val & KUMCTRLSTA_MASK));
   8920 
   8921 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8922 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8923 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8924 		wm_put_swfwhw_semaphore(sc);
   8925 }
   8926 
   8927 /* SGMII related */
   8928 
   8929 /*
   8930  * wm_sgmii_uses_mdio
   8931  *
   8932  * Check whether the transaction is to the internal PHY or the external
   8933  * MDIO interface. Return true if it's MDIO.
   8934  */
   8935 static bool
   8936 wm_sgmii_uses_mdio(struct wm_softc *sc)
   8937 {
   8938 	uint32_t reg;
   8939 	bool ismdio = false;
   8940 
   8941 	switch (sc->sc_type) {
   8942 	case WM_T_82575:
   8943 	case WM_T_82576:
   8944 		reg = CSR_READ(sc, WMREG_MDIC);
   8945 		ismdio = ((reg & MDIC_DEST) != 0);
   8946 		break;
   8947 	case WM_T_82580:
   8948 	case WM_T_I350:
   8949 	case WM_T_I354:
   8950 	case WM_T_I210:
   8951 	case WM_T_I211:
   8952 		reg = CSR_READ(sc, WMREG_MDICNFG);
   8953 		ismdio = ((reg & MDICNFG_DEST) != 0);
   8954 		break;
   8955 	default:
   8956 		break;
   8957 	}
   8958 
   8959 	return ismdio;
   8960 }
   8961 
   8962 /*
   8963  * wm_sgmii_readreg:	[mii interface function]
   8964  *
   8965  *	Read a PHY register on the SGMII
   8966  * This could be handled by the PHY layer if we didn't have to lock the
   8967  * ressource ...
   8968  */
   8969 static int
   8970 wm_sgmii_readreg(device_t self, int phy, int reg)
   8971 {
   8972 	struct wm_softc *sc = device_private(self);
   8973 	uint32_t i2ccmd;
   8974 	int i, rv;
   8975 
   8976 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8977 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8978 		    __func__);
   8979 		return 0;
   8980 	}
   8981 
   8982 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8983 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8984 	    | I2CCMD_OPCODE_READ;
   8985 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8986 
   8987 	/* Poll the ready bit */
   8988 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8989 		delay(50);
   8990 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8991 		if (i2ccmd & I2CCMD_READY)
   8992 			break;
   8993 	}
   8994 	if ((i2ccmd & I2CCMD_READY) == 0)
   8995 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   8996 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8997 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8998 
   8999 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9000 
   9001 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9002 	return rv;
   9003 }
   9004 
   9005 /*
   9006  * wm_sgmii_writereg:	[mii interface function]
   9007  *
   9008  *	Write a PHY register on the SGMII.
   9009  * This could be handled by the PHY layer if we didn't have to lock the
   9010  * ressource ...
   9011  */
   9012 static void
   9013 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9014 {
   9015 	struct wm_softc *sc = device_private(self);
   9016 	uint32_t i2ccmd;
   9017 	int i;
   9018 	int val_swapped;
   9019 
   9020 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9021 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9022 		    __func__);
   9023 		return;
   9024 	}
   9025 	/* Swap the data bytes for the I2C interface */
   9026 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9027 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9028 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9029 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9030 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9031 
   9032 	/* Poll the ready bit */
   9033 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9034 		delay(50);
   9035 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9036 		if (i2ccmd & I2CCMD_READY)
   9037 			break;
   9038 	}
   9039 	if ((i2ccmd & I2CCMD_READY) == 0)
   9040 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9041 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9042 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9043 
   9044 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9045 }
   9046 
   9047 /* TBI related */
   9048 
   9049 /*
   9050  * wm_tbi_mediainit:
   9051  *
   9052  *	Initialize media for use on 1000BASE-X devices.
   9053  */
   9054 static void
   9055 wm_tbi_mediainit(struct wm_softc *sc)
   9056 {
   9057 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9058 	const char *sep = "";
   9059 
   9060 	if (sc->sc_type < WM_T_82543)
   9061 		sc->sc_tipg = TIPG_WM_DFLT;
   9062 	else
   9063 		sc->sc_tipg = TIPG_LG_DFLT;
   9064 
   9065 	sc->sc_tbi_serdes_anegticks = 5;
   9066 
   9067 	/* Initialize our media structures */
   9068 	sc->sc_mii.mii_ifp = ifp;
   9069 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9070 
   9071 	if ((sc->sc_type >= WM_T_82575)
   9072 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9073 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9074 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9075 	else
   9076 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9077 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9078 
   9079 	/*
   9080 	 * SWD Pins:
   9081 	 *
   9082 	 *	0 = Link LED (output)
   9083 	 *	1 = Loss Of Signal (input)
   9084 	 */
   9085 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9086 
   9087 	/* XXX Perhaps this is only for TBI */
   9088 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9089 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9090 
   9091 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9092 		sc->sc_ctrl &= ~CTRL_LRST;
   9093 
   9094 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9095 
   9096 #define	ADD(ss, mm, dd)							\
   9097 do {									\
   9098 	aprint_normal("%s%s", sep, ss);					\
   9099 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   9100 	sep = ", ";							\
   9101 } while (/*CONSTCOND*/0)
   9102 
   9103 	aprint_normal_dev(sc->sc_dev, "");
   9104 
   9105 	/* Only 82545 is LX */
   9106 	if (sc->sc_type == WM_T_82545) {
   9107 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9108 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   9109 	} else {
   9110 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9111 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   9112 	}
   9113 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   9114 	aprint_normal("\n");
   9115 
   9116 #undef ADD
   9117 
   9118 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9119 }
   9120 
   9121 /*
   9122  * wm_tbi_mediachange:	[ifmedia interface function]
   9123  *
   9124  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9125  */
   9126 static int
   9127 wm_tbi_mediachange(struct ifnet *ifp)
   9128 {
   9129 	struct wm_softc *sc = ifp->if_softc;
   9130 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9131 	uint32_t status;
   9132 	int i;
   9133 
   9134 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9135 		/* XXX need some work for >= 82571 and < 82575 */
   9136 		if (sc->sc_type < WM_T_82575)
   9137 			return 0;
   9138 	}
   9139 
   9140 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9141 	    || (sc->sc_type >= WM_T_82575))
   9142 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9143 
   9144 	sc->sc_ctrl &= ~CTRL_LRST;
   9145 	sc->sc_txcw = TXCW_ANE;
   9146 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9147 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9148 	else if (ife->ifm_media & IFM_FDX)
   9149 		sc->sc_txcw |= TXCW_FD;
   9150 	else
   9151 		sc->sc_txcw |= TXCW_HD;
   9152 
   9153 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9154 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9155 
   9156 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9157 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9158 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9159 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9160 	CSR_WRITE_FLUSH(sc);
   9161 	delay(1000);
   9162 
   9163 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9164 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9165 
   9166 	/*
   9167 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9168 	 * optics detect a signal, 0 if they don't.
   9169 	 */
   9170 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9171 		/* Have signal; wait for the link to come up. */
   9172 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9173 			delay(10000);
   9174 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9175 				break;
   9176 		}
   9177 
   9178 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9179 			    device_xname(sc->sc_dev),i));
   9180 
   9181 		status = CSR_READ(sc, WMREG_STATUS);
   9182 		DPRINTF(WM_DEBUG_LINK,
   9183 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9184 			device_xname(sc->sc_dev),status, STATUS_LU));
   9185 		if (status & STATUS_LU) {
   9186 			/* Link is up. */
   9187 			DPRINTF(WM_DEBUG_LINK,
   9188 			    ("%s: LINK: set media -> link up %s\n",
   9189 			    device_xname(sc->sc_dev),
   9190 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9191 
   9192 			/*
   9193 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9194 			 * so we should update sc->sc_ctrl
   9195 			 */
   9196 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9197 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9198 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9199 			if (status & STATUS_FD)
   9200 				sc->sc_tctl |=
   9201 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9202 			else
   9203 				sc->sc_tctl |=
   9204 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9205 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9206 				sc->sc_fcrtl |= FCRTL_XONE;
   9207 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9208 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9209 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9210 				      sc->sc_fcrtl);
   9211 			sc->sc_tbi_linkup = 1;
   9212 		} else {
   9213 			if (i == WM_LINKUP_TIMEOUT)
   9214 				wm_check_for_link(sc);
   9215 			/* Link is down. */
   9216 			DPRINTF(WM_DEBUG_LINK,
   9217 			    ("%s: LINK: set media -> link down\n",
   9218 			    device_xname(sc->sc_dev)));
   9219 			sc->sc_tbi_linkup = 0;
   9220 		}
   9221 	} else {
   9222 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9223 		    device_xname(sc->sc_dev)));
   9224 		sc->sc_tbi_linkup = 0;
   9225 	}
   9226 
   9227 	wm_tbi_serdes_set_linkled(sc);
   9228 
   9229 	return 0;
   9230 }
   9231 
   9232 /*
   9233  * wm_tbi_mediastatus:	[ifmedia interface function]
   9234  *
   9235  *	Get the current interface media status on a 1000BASE-X device.
   9236  */
   9237 static void
   9238 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9239 {
   9240 	struct wm_softc *sc = ifp->if_softc;
   9241 	uint32_t ctrl, status;
   9242 
   9243 	ifmr->ifm_status = IFM_AVALID;
   9244 	ifmr->ifm_active = IFM_ETHER;
   9245 
   9246 	status = CSR_READ(sc, WMREG_STATUS);
   9247 	if ((status & STATUS_LU) == 0) {
   9248 		ifmr->ifm_active |= IFM_NONE;
   9249 		return;
   9250 	}
   9251 
   9252 	ifmr->ifm_status |= IFM_ACTIVE;
   9253 	/* Only 82545 is LX */
   9254 	if (sc->sc_type == WM_T_82545)
   9255 		ifmr->ifm_active |= IFM_1000_LX;
   9256 	else
   9257 		ifmr->ifm_active |= IFM_1000_SX;
   9258 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9259 		ifmr->ifm_active |= IFM_FDX;
   9260 	else
   9261 		ifmr->ifm_active |= IFM_HDX;
   9262 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9263 	if (ctrl & CTRL_RFCE)
   9264 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9265 	if (ctrl & CTRL_TFCE)
   9266 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9267 }
   9268 
   9269 /* XXX TBI only */
   9270 static int
   9271 wm_check_for_link(struct wm_softc *sc)
   9272 {
   9273 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9274 	uint32_t rxcw;
   9275 	uint32_t ctrl;
   9276 	uint32_t status;
   9277 	uint32_t sig;
   9278 
   9279 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9280 		/* XXX need some work for >= 82571 */
   9281 		if (sc->sc_type >= WM_T_82571) {
   9282 			sc->sc_tbi_linkup = 1;
   9283 			return 0;
   9284 		}
   9285 	}
   9286 
   9287 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9288 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9289 	status = CSR_READ(sc, WMREG_STATUS);
   9290 
   9291 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9292 
   9293 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9294 		device_xname(sc->sc_dev), __func__,
   9295 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9296 		((status & STATUS_LU) != 0),
   9297 		((rxcw & RXCW_C) != 0)
   9298 		    ));
   9299 
   9300 	/*
   9301 	 * SWDPIN   LU RXCW
   9302 	 *      0    0    0
   9303 	 *      0    0    1	(should not happen)
   9304 	 *      0    1    0	(should not happen)
   9305 	 *      0    1    1	(should not happen)
   9306 	 *      1    0    0	Disable autonego and force linkup
   9307 	 *      1    0    1	got /C/ but not linkup yet
   9308 	 *      1    1    0	(linkup)
   9309 	 *      1    1    1	If IFM_AUTO, back to autonego
   9310 	 *
   9311 	 */
   9312 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9313 	    && ((status & STATUS_LU) == 0)
   9314 	    && ((rxcw & RXCW_C) == 0)) {
   9315 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9316 			__func__));
   9317 		sc->sc_tbi_linkup = 0;
   9318 		/* Disable auto-negotiation in the TXCW register */
   9319 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9320 
   9321 		/*
   9322 		 * Force link-up and also force full-duplex.
   9323 		 *
   9324 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9325 		 * so we should update sc->sc_ctrl
   9326 		 */
   9327 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9328 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9329 	} else if (((status & STATUS_LU) != 0)
   9330 	    && ((rxcw & RXCW_C) != 0)
   9331 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9332 		sc->sc_tbi_linkup = 1;
   9333 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9334 			__func__));
   9335 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9336 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9337 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9338 	    && ((rxcw & RXCW_C) != 0)) {
   9339 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9340 	} else {
   9341 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9342 			status));
   9343 	}
   9344 
   9345 	return 0;
   9346 }
   9347 
   9348 /*
   9349  * wm_tbi_tick:
   9350  *
   9351  *	Check the link on TBI devices.
   9352  *	This function acts as mii_tick().
   9353  */
   9354 static void
   9355 wm_tbi_tick(struct wm_softc *sc)
   9356 {
   9357 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
   9358 	struct mii_data *mii = &sc->sc_mii;
   9359 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9360 	uint32_t status;
   9361 
   9362 	KASSERT(WM_TX_LOCKED(txq));
   9363 
   9364 	status = CSR_READ(sc, WMREG_STATUS);
   9365 
   9366 	/* XXX is this needed? */
   9367 	(void)CSR_READ(sc, WMREG_RXCW);
   9368 	(void)CSR_READ(sc, WMREG_CTRL);
   9369 
   9370 	/* set link status */
   9371 	if ((status & STATUS_LU) == 0) {
   9372 		DPRINTF(WM_DEBUG_LINK,
   9373 		    ("%s: LINK: checklink -> down\n",
   9374 			device_xname(sc->sc_dev)));
   9375 		sc->sc_tbi_linkup = 0;
   9376 	} else if (sc->sc_tbi_linkup == 0) {
   9377 		DPRINTF(WM_DEBUG_LINK,
   9378 		    ("%s: LINK: checklink -> up %s\n",
   9379 			device_xname(sc->sc_dev),
   9380 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9381 		sc->sc_tbi_linkup = 1;
   9382 		sc->sc_tbi_serdes_ticks = 0;
   9383 	}
   9384 
   9385 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9386 		goto setled;
   9387 
   9388 	if ((status & STATUS_LU) == 0) {
   9389 		sc->sc_tbi_linkup = 0;
   9390 		/* If the timer expired, retry autonegotiation */
   9391 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9392 		    && (++sc->sc_tbi_serdes_ticks
   9393 			>= sc->sc_tbi_serdes_anegticks)) {
   9394 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9395 			sc->sc_tbi_serdes_ticks = 0;
   9396 			/*
   9397 			 * Reset the link, and let autonegotiation do
   9398 			 * its thing
   9399 			 */
   9400 			sc->sc_ctrl |= CTRL_LRST;
   9401 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9402 			CSR_WRITE_FLUSH(sc);
   9403 			delay(1000);
   9404 			sc->sc_ctrl &= ~CTRL_LRST;
   9405 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9406 			CSR_WRITE_FLUSH(sc);
   9407 			delay(1000);
   9408 			CSR_WRITE(sc, WMREG_TXCW,
   9409 			    sc->sc_txcw & ~TXCW_ANE);
   9410 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9411 		}
   9412 	}
   9413 
   9414 setled:
   9415 	wm_tbi_serdes_set_linkled(sc);
   9416 }
   9417 
   9418 /* SERDES related */
   9419 static void
   9420 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9421 {
   9422 	uint32_t reg;
   9423 
   9424 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9425 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9426 		return;
   9427 
   9428 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9429 	reg |= PCS_CFG_PCS_EN;
   9430 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9431 
   9432 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9433 	reg &= ~CTRL_EXT_SWDPIN(3);
   9434 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9435 	CSR_WRITE_FLUSH(sc);
   9436 }
   9437 
   9438 static int
   9439 wm_serdes_mediachange(struct ifnet *ifp)
   9440 {
   9441 	struct wm_softc *sc = ifp->if_softc;
   9442 	bool pcs_autoneg = true; /* XXX */
   9443 	uint32_t ctrl_ext, pcs_lctl, reg;
   9444 
   9445 	/* XXX Currently, this function is not called on 8257[12] */
   9446 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9447 	    || (sc->sc_type >= WM_T_82575))
   9448 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9449 
   9450 	wm_serdes_power_up_link_82575(sc);
   9451 
   9452 	sc->sc_ctrl |= CTRL_SLU;
   9453 
   9454 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9455 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9456 
   9457 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9458 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9459 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9460 	case CTRL_EXT_LINK_MODE_SGMII:
   9461 		pcs_autoneg = true;
   9462 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9463 		break;
   9464 	case CTRL_EXT_LINK_MODE_1000KX:
   9465 		pcs_autoneg = false;
   9466 		/* FALLTHROUGH */
   9467 	default:
   9468 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   9469 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9470 				pcs_autoneg = false;
   9471 		}
   9472 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9473 		    | CTRL_FRCFDX;
   9474 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9475 	}
   9476 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9477 
   9478 	if (pcs_autoneg) {
   9479 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9480 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9481 
   9482 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9483 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9484 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9485 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9486 	} else
   9487 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9488 
   9489 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9490 
   9491 
   9492 	return 0;
   9493 }
   9494 
   9495 static void
   9496 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9497 {
   9498 	struct wm_softc *sc = ifp->if_softc;
   9499 	struct mii_data *mii = &sc->sc_mii;
   9500 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9501 	uint32_t pcs_adv, pcs_lpab, reg;
   9502 
   9503 	ifmr->ifm_status = IFM_AVALID;
   9504 	ifmr->ifm_active = IFM_ETHER;
   9505 
   9506 	/* Check PCS */
   9507 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9508 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9509 		ifmr->ifm_active |= IFM_NONE;
   9510 		sc->sc_tbi_linkup = 0;
   9511 		goto setled;
   9512 	}
   9513 
   9514 	sc->sc_tbi_linkup = 1;
   9515 	ifmr->ifm_status |= IFM_ACTIVE;
   9516 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9517 	if ((reg & PCS_LSTS_FDX) != 0)
   9518 		ifmr->ifm_active |= IFM_FDX;
   9519 	else
   9520 		ifmr->ifm_active |= IFM_HDX;
   9521 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9522 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9523 		/* Check flow */
   9524 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9525 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9526 			printf("XXX LINKOK but not ACOMP\n");
   9527 			goto setled;
   9528 		}
   9529 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9530 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9531 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   9532 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9533 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9534 			mii->mii_media_active |= IFM_FLOW
   9535 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9536 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9537 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9538 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9539 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9540 			mii->mii_media_active |= IFM_FLOW
   9541 			    | IFM_ETH_TXPAUSE;
   9542 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9543 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9544 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9545 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9546 			mii->mii_media_active |= IFM_FLOW
   9547 			    | IFM_ETH_RXPAUSE;
   9548 		} else {
   9549 		}
   9550 	}
   9551 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9552 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9553 setled:
   9554 	wm_tbi_serdes_set_linkled(sc);
   9555 }
   9556 
   9557 /*
   9558  * wm_serdes_tick:
   9559  *
   9560  *	Check the link on serdes devices.
   9561  */
   9562 static void
   9563 wm_serdes_tick(struct wm_softc *sc)
   9564 {
   9565 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
   9566 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9567 	struct mii_data *mii = &sc->sc_mii;
   9568 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9569 	uint32_t reg;
   9570 
   9571 	KASSERT(WM_TX_LOCKED(txq));
   9572 
   9573 	mii->mii_media_status = IFM_AVALID;
   9574 	mii->mii_media_active = IFM_ETHER;
   9575 
   9576 	/* Check PCS */
   9577 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9578 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9579 		mii->mii_media_status |= IFM_ACTIVE;
   9580 		sc->sc_tbi_linkup = 1;
   9581 		sc->sc_tbi_serdes_ticks = 0;
   9582 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9583 		if ((reg & PCS_LSTS_FDX) != 0)
   9584 			mii->mii_media_active |= IFM_FDX;
   9585 		else
   9586 			mii->mii_media_active |= IFM_HDX;
   9587 	} else {
   9588 		mii->mii_media_status |= IFM_NONE;
   9589 		sc->sc_tbi_linkup = 0;
   9590 		    /* If the timer expired, retry autonegotiation */
   9591 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9592 		    && (++sc->sc_tbi_serdes_ticks
   9593 			>= sc->sc_tbi_serdes_anegticks)) {
   9594 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9595 			sc->sc_tbi_serdes_ticks = 0;
   9596 			/* XXX */
   9597 			wm_serdes_mediachange(ifp);
   9598 		}
   9599 	}
   9600 
   9601 	wm_tbi_serdes_set_linkled(sc);
   9602 }
   9603 
   9604 /* SFP related */
   9605 
   9606 static int
   9607 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9608 {
   9609 	uint32_t i2ccmd;
   9610 	int i;
   9611 
   9612 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9613 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9614 
   9615 	/* Poll the ready bit */
   9616 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9617 		delay(50);
   9618 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9619 		if (i2ccmd & I2CCMD_READY)
   9620 			break;
   9621 	}
   9622 	if ((i2ccmd & I2CCMD_READY) == 0)
   9623 		return -1;
   9624 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9625 		return -1;
   9626 
   9627 	*data = i2ccmd & 0x00ff;
   9628 
   9629 	return 0;
   9630 }
   9631 
   9632 static uint32_t
   9633 wm_sfp_get_media_type(struct wm_softc *sc)
   9634 {
   9635 	uint32_t ctrl_ext;
   9636 	uint8_t val = 0;
   9637 	int timeout = 3;
   9638 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9639 	int rv = -1;
   9640 
   9641 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9642 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9643 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9644 	CSR_WRITE_FLUSH(sc);
   9645 
   9646 	/* Read SFP module data */
   9647 	while (timeout) {
   9648 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9649 		if (rv == 0)
   9650 			break;
   9651 		delay(100*1000); /* XXX too big */
   9652 		timeout--;
   9653 	}
   9654 	if (rv != 0)
   9655 		goto out;
   9656 	switch (val) {
   9657 	case SFF_SFP_ID_SFF:
   9658 		aprint_normal_dev(sc->sc_dev,
   9659 		    "Module/Connector soldered to board\n");
   9660 		break;
   9661 	case SFF_SFP_ID_SFP:
   9662 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9663 		break;
   9664 	case SFF_SFP_ID_UNKNOWN:
   9665 		goto out;
   9666 	default:
   9667 		break;
   9668 	}
   9669 
   9670 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9671 	if (rv != 0) {
   9672 		goto out;
   9673 	}
   9674 
   9675 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9676 		mediatype = WM_MEDIATYPE_SERDES;
   9677 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9678 		sc->sc_flags |= WM_F_SGMII;
   9679 		mediatype = WM_MEDIATYPE_COPPER;
   9680 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9681 		sc->sc_flags |= WM_F_SGMII;
   9682 		mediatype = WM_MEDIATYPE_SERDES;
   9683 	}
   9684 
   9685 out:
   9686 	/* Restore I2C interface setting */
   9687 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9688 
   9689 	return mediatype;
   9690 }
   9691 /*
   9692  * NVM related.
   9693  * Microwire, SPI (w/wo EERD) and Flash.
   9694  */
   9695 
   9696 /* Both spi and uwire */
   9697 
   9698 /*
   9699  * wm_eeprom_sendbits:
   9700  *
   9701  *	Send a series of bits to the EEPROM.
   9702  */
   9703 static void
   9704 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9705 {
   9706 	uint32_t reg;
   9707 	int x;
   9708 
   9709 	reg = CSR_READ(sc, WMREG_EECD);
   9710 
   9711 	for (x = nbits; x > 0; x--) {
   9712 		if (bits & (1U << (x - 1)))
   9713 			reg |= EECD_DI;
   9714 		else
   9715 			reg &= ~EECD_DI;
   9716 		CSR_WRITE(sc, WMREG_EECD, reg);
   9717 		CSR_WRITE_FLUSH(sc);
   9718 		delay(2);
   9719 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9720 		CSR_WRITE_FLUSH(sc);
   9721 		delay(2);
   9722 		CSR_WRITE(sc, WMREG_EECD, reg);
   9723 		CSR_WRITE_FLUSH(sc);
   9724 		delay(2);
   9725 	}
   9726 }
   9727 
   9728 /*
   9729  * wm_eeprom_recvbits:
   9730  *
   9731  *	Receive a series of bits from the EEPROM.
   9732  */
   9733 static void
   9734 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9735 {
   9736 	uint32_t reg, val;
   9737 	int x;
   9738 
   9739 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9740 
   9741 	val = 0;
   9742 	for (x = nbits; x > 0; x--) {
   9743 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9744 		CSR_WRITE_FLUSH(sc);
   9745 		delay(2);
   9746 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9747 			val |= (1U << (x - 1));
   9748 		CSR_WRITE(sc, WMREG_EECD, reg);
   9749 		CSR_WRITE_FLUSH(sc);
   9750 		delay(2);
   9751 	}
   9752 	*valp = val;
   9753 }
   9754 
   9755 /* Microwire */
   9756 
   9757 /*
   9758  * wm_nvm_read_uwire:
   9759  *
   9760  *	Read a word from the EEPROM using the MicroWire protocol.
   9761  */
   9762 static int
   9763 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9764 {
   9765 	uint32_t reg, val;
   9766 	int i;
   9767 
   9768 	for (i = 0; i < wordcnt; i++) {
   9769 		/* Clear SK and DI. */
   9770 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9771 		CSR_WRITE(sc, WMREG_EECD, reg);
   9772 
   9773 		/*
   9774 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9775 		 * and Xen.
   9776 		 *
   9777 		 * We use this workaround only for 82540 because qemu's
   9778 		 * e1000 act as 82540.
   9779 		 */
   9780 		if (sc->sc_type == WM_T_82540) {
   9781 			reg |= EECD_SK;
   9782 			CSR_WRITE(sc, WMREG_EECD, reg);
   9783 			reg &= ~EECD_SK;
   9784 			CSR_WRITE(sc, WMREG_EECD, reg);
   9785 			CSR_WRITE_FLUSH(sc);
   9786 			delay(2);
   9787 		}
   9788 		/* XXX: end of workaround */
   9789 
   9790 		/* Set CHIP SELECT. */
   9791 		reg |= EECD_CS;
   9792 		CSR_WRITE(sc, WMREG_EECD, reg);
   9793 		CSR_WRITE_FLUSH(sc);
   9794 		delay(2);
   9795 
   9796 		/* Shift in the READ command. */
   9797 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9798 
   9799 		/* Shift in address. */
   9800 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9801 
   9802 		/* Shift out the data. */
   9803 		wm_eeprom_recvbits(sc, &val, 16);
   9804 		data[i] = val & 0xffff;
   9805 
   9806 		/* Clear CHIP SELECT. */
   9807 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9808 		CSR_WRITE(sc, WMREG_EECD, reg);
   9809 		CSR_WRITE_FLUSH(sc);
   9810 		delay(2);
   9811 	}
   9812 
   9813 	return 0;
   9814 }
   9815 
   9816 /* SPI */
   9817 
   9818 /*
   9819  * Set SPI and FLASH related information from the EECD register.
   9820  * For 82541 and 82547, the word size is taken from EEPROM.
   9821  */
   9822 static int
   9823 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9824 {
   9825 	int size;
   9826 	uint32_t reg;
   9827 	uint16_t data;
   9828 
   9829 	reg = CSR_READ(sc, WMREG_EECD);
   9830 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9831 
   9832 	/* Read the size of NVM from EECD by default */
   9833 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9834 	switch (sc->sc_type) {
   9835 	case WM_T_82541:
   9836 	case WM_T_82541_2:
   9837 	case WM_T_82547:
   9838 	case WM_T_82547_2:
   9839 		/* Set dummy value to access EEPROM */
   9840 		sc->sc_nvm_wordsize = 64;
   9841 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9842 		reg = data;
   9843 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9844 		if (size == 0)
   9845 			size = 6; /* 64 word size */
   9846 		else
   9847 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9848 		break;
   9849 	case WM_T_80003:
   9850 	case WM_T_82571:
   9851 	case WM_T_82572:
   9852 	case WM_T_82573: /* SPI case */
   9853 	case WM_T_82574: /* SPI case */
   9854 	case WM_T_82583: /* SPI case */
   9855 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9856 		if (size > 14)
   9857 			size = 14;
   9858 		break;
   9859 	case WM_T_82575:
   9860 	case WM_T_82576:
   9861 	case WM_T_82580:
   9862 	case WM_T_I350:
   9863 	case WM_T_I354:
   9864 	case WM_T_I210:
   9865 	case WM_T_I211:
   9866 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9867 		if (size > 15)
   9868 			size = 15;
   9869 		break;
   9870 	default:
   9871 		aprint_error_dev(sc->sc_dev,
   9872 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9873 		return -1;
   9874 		break;
   9875 	}
   9876 
   9877 	sc->sc_nvm_wordsize = 1 << size;
   9878 
   9879 	return 0;
   9880 }
   9881 
   9882 /*
   9883  * wm_nvm_ready_spi:
   9884  *
   9885  *	Wait for a SPI EEPROM to be ready for commands.
   9886  */
   9887 static int
   9888 wm_nvm_ready_spi(struct wm_softc *sc)
   9889 {
   9890 	uint32_t val;
   9891 	int usec;
   9892 
   9893 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9894 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9895 		wm_eeprom_recvbits(sc, &val, 8);
   9896 		if ((val & SPI_SR_RDY) == 0)
   9897 			break;
   9898 	}
   9899 	if (usec >= SPI_MAX_RETRIES) {
   9900 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   9901 		return 1;
   9902 	}
   9903 	return 0;
   9904 }
   9905 
   9906 /*
   9907  * wm_nvm_read_spi:
   9908  *
   9909  *	Read a work from the EEPROM using the SPI protocol.
   9910  */
   9911 static int
   9912 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9913 {
   9914 	uint32_t reg, val;
   9915 	int i;
   9916 	uint8_t opc;
   9917 
   9918 	/* Clear SK and CS. */
   9919 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   9920 	CSR_WRITE(sc, WMREG_EECD, reg);
   9921 	CSR_WRITE_FLUSH(sc);
   9922 	delay(2);
   9923 
   9924 	if (wm_nvm_ready_spi(sc))
   9925 		return 1;
   9926 
   9927 	/* Toggle CS to flush commands. */
   9928 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   9929 	CSR_WRITE_FLUSH(sc);
   9930 	delay(2);
   9931 	CSR_WRITE(sc, WMREG_EECD, reg);
   9932 	CSR_WRITE_FLUSH(sc);
   9933 	delay(2);
   9934 
   9935 	opc = SPI_OPC_READ;
   9936 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   9937 		opc |= SPI_OPC_A8;
   9938 
   9939 	wm_eeprom_sendbits(sc, opc, 8);
   9940 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   9941 
   9942 	for (i = 0; i < wordcnt; i++) {
   9943 		wm_eeprom_recvbits(sc, &val, 16);
   9944 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   9945 	}
   9946 
   9947 	/* Raise CS and clear SK. */
   9948 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   9949 	CSR_WRITE(sc, WMREG_EECD, reg);
   9950 	CSR_WRITE_FLUSH(sc);
   9951 	delay(2);
   9952 
   9953 	return 0;
   9954 }
   9955 
   9956 /* Using with EERD */
   9957 
   9958 static int
   9959 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   9960 {
   9961 	uint32_t attempts = 100000;
   9962 	uint32_t i, reg = 0;
   9963 	int32_t done = -1;
   9964 
   9965 	for (i = 0; i < attempts; i++) {
   9966 		reg = CSR_READ(sc, rw);
   9967 
   9968 		if (reg & EERD_DONE) {
   9969 			done = 0;
   9970 			break;
   9971 		}
   9972 		delay(5);
   9973 	}
   9974 
   9975 	return done;
   9976 }
   9977 
   9978 static int
   9979 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   9980     uint16_t *data)
   9981 {
   9982 	int i, eerd = 0;
   9983 	int error = 0;
   9984 
   9985 	for (i = 0; i < wordcnt; i++) {
   9986 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   9987 
   9988 		CSR_WRITE(sc, WMREG_EERD, eerd);
   9989 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   9990 		if (error != 0)
   9991 			break;
   9992 
   9993 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   9994 	}
   9995 
   9996 	return error;
   9997 }
   9998 
   9999 /* Flash */
   10000 
   10001 static int
   10002 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10003 {
   10004 	uint32_t eecd;
   10005 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10006 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10007 	uint8_t sig_byte = 0;
   10008 
   10009 	switch (sc->sc_type) {
   10010 	case WM_T_ICH8:
   10011 	case WM_T_ICH9:
   10012 		eecd = CSR_READ(sc, WMREG_EECD);
   10013 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10014 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10015 			return 0;
   10016 		}
   10017 		/* FALLTHROUGH */
   10018 	default:
   10019 		/* Default to 0 */
   10020 		*bank = 0;
   10021 
   10022 		/* Check bank 0 */
   10023 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10024 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10025 			*bank = 0;
   10026 			return 0;
   10027 		}
   10028 
   10029 		/* Check bank 1 */
   10030 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10031 		    &sig_byte);
   10032 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10033 			*bank = 1;
   10034 			return 0;
   10035 		}
   10036 	}
   10037 
   10038 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10039 		device_xname(sc->sc_dev)));
   10040 	return -1;
   10041 }
   10042 
   10043 /******************************************************************************
   10044  * This function does initial flash setup so that a new read/write/erase cycle
   10045  * can be started.
   10046  *
   10047  * sc - The pointer to the hw structure
   10048  ****************************************************************************/
   10049 static int32_t
   10050 wm_ich8_cycle_init(struct wm_softc *sc)
   10051 {
   10052 	uint16_t hsfsts;
   10053 	int32_t error = 1;
   10054 	int32_t i     = 0;
   10055 
   10056 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10057 
   10058 	/* May be check the Flash Des Valid bit in Hw status */
   10059 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10060 		return error;
   10061 	}
   10062 
   10063 	/* Clear FCERR in Hw status by writing 1 */
   10064 	/* Clear DAEL in Hw status by writing a 1 */
   10065 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10066 
   10067 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10068 
   10069 	/*
   10070 	 * Either we should have a hardware SPI cycle in progress bit to check
   10071 	 * against, in order to start a new cycle or FDONE bit should be
   10072 	 * changed in the hardware so that it is 1 after harware reset, which
   10073 	 * can then be used as an indication whether a cycle is in progress or
   10074 	 * has been completed .. we should also have some software semaphore
   10075 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10076 	 * threads access to those bits can be sequentiallized or a way so that
   10077 	 * 2 threads dont start the cycle at the same time
   10078 	 */
   10079 
   10080 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10081 		/*
   10082 		 * There is no cycle running at present, so we can start a
   10083 		 * cycle
   10084 		 */
   10085 
   10086 		/* Begin by setting Flash Cycle Done. */
   10087 		hsfsts |= HSFSTS_DONE;
   10088 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10089 		error = 0;
   10090 	} else {
   10091 		/*
   10092 		 * otherwise poll for sometime so the current cycle has a
   10093 		 * chance to end before giving up.
   10094 		 */
   10095 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10096 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10097 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10098 				error = 0;
   10099 				break;
   10100 			}
   10101 			delay(1);
   10102 		}
   10103 		if (error == 0) {
   10104 			/*
   10105 			 * Successful in waiting for previous cycle to timeout,
   10106 			 * now set the Flash Cycle Done.
   10107 			 */
   10108 			hsfsts |= HSFSTS_DONE;
   10109 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10110 		}
   10111 	}
   10112 	return error;
   10113 }
   10114 
   10115 /******************************************************************************
   10116  * This function starts a flash cycle and waits for its completion
   10117  *
   10118  * sc - The pointer to the hw structure
   10119  ****************************************************************************/
   10120 static int32_t
   10121 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10122 {
   10123 	uint16_t hsflctl;
   10124 	uint16_t hsfsts;
   10125 	int32_t error = 1;
   10126 	uint32_t i = 0;
   10127 
   10128 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10129 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10130 	hsflctl |= HSFCTL_GO;
   10131 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10132 
   10133 	/* Wait till FDONE bit is set to 1 */
   10134 	do {
   10135 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10136 		if (hsfsts & HSFSTS_DONE)
   10137 			break;
   10138 		delay(1);
   10139 		i++;
   10140 	} while (i < timeout);
   10141 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10142 		error = 0;
   10143 
   10144 	return error;
   10145 }
   10146 
   10147 /******************************************************************************
   10148  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   10149  *
   10150  * sc - The pointer to the hw structure
   10151  * index - The index of the byte or word to read.
   10152  * size - Size of data to read, 1=byte 2=word
   10153  * data - Pointer to the word to store the value read.
   10154  *****************************************************************************/
   10155 static int32_t
   10156 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10157     uint32_t size, uint16_t *data)
   10158 {
   10159 	uint16_t hsfsts;
   10160 	uint16_t hsflctl;
   10161 	uint32_t flash_linear_address;
   10162 	uint32_t flash_data = 0;
   10163 	int32_t error = 1;
   10164 	int32_t count = 0;
   10165 
   10166 	if (size < 1  || size > 2 || data == 0x0 ||
   10167 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10168 		return error;
   10169 
   10170 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10171 	    sc->sc_ich8_flash_base;
   10172 
   10173 	do {
   10174 		delay(1);
   10175 		/* Steps */
   10176 		error = wm_ich8_cycle_init(sc);
   10177 		if (error)
   10178 			break;
   10179 
   10180 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10181 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10182 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10183 		    & HSFCTL_BCOUNT_MASK;
   10184 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10185 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10186 
   10187 		/*
   10188 		 * Write the last 24 bits of index into Flash Linear address
   10189 		 * field in Flash Address
   10190 		 */
   10191 		/* TODO: TBD maybe check the index against the size of flash */
   10192 
   10193 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10194 
   10195 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10196 
   10197 		/*
   10198 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10199 		 * the whole sequence a few more times, else read in (shift in)
   10200 		 * the Flash Data0, the order is least significant byte first
   10201 		 * msb to lsb
   10202 		 */
   10203 		if (error == 0) {
   10204 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10205 			if (size == 1)
   10206 				*data = (uint8_t)(flash_data & 0x000000FF);
   10207 			else if (size == 2)
   10208 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10209 			break;
   10210 		} else {
   10211 			/*
   10212 			 * If we've gotten here, then things are probably
   10213 			 * completely hosed, but if the error condition is
   10214 			 * detected, it won't hurt to give it another try...
   10215 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10216 			 */
   10217 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10218 			if (hsfsts & HSFSTS_ERR) {
   10219 				/* Repeat for some time before giving up. */
   10220 				continue;
   10221 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10222 				break;
   10223 		}
   10224 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10225 
   10226 	return error;
   10227 }
   10228 
   10229 /******************************************************************************
   10230  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10231  *
   10232  * sc - pointer to wm_hw structure
   10233  * index - The index of the byte to read.
   10234  * data - Pointer to a byte to store the value read.
   10235  *****************************************************************************/
   10236 static int32_t
   10237 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10238 {
   10239 	int32_t status;
   10240 	uint16_t word = 0;
   10241 
   10242 	status = wm_read_ich8_data(sc, index, 1, &word);
   10243 	if (status == 0)
   10244 		*data = (uint8_t)word;
   10245 	else
   10246 		*data = 0;
   10247 
   10248 	return status;
   10249 }
   10250 
   10251 /******************************************************************************
   10252  * Reads a word from the NVM using the ICH8 flash access registers.
   10253  *
   10254  * sc - pointer to wm_hw structure
   10255  * index - The starting byte index of the word to read.
   10256  * data - Pointer to a word to store the value read.
   10257  *****************************************************************************/
   10258 static int32_t
   10259 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10260 {
   10261 	int32_t status;
   10262 
   10263 	status = wm_read_ich8_data(sc, index, 2, data);
   10264 	return status;
   10265 }
   10266 
   10267 /******************************************************************************
   10268  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10269  * register.
   10270  *
   10271  * sc - Struct containing variables accessed by shared code
   10272  * offset - offset of word in the EEPROM to read
   10273  * data - word read from the EEPROM
   10274  * words - number of words to read
   10275  *****************************************************************************/
   10276 static int
   10277 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10278 {
   10279 	int32_t  error = 0;
   10280 	uint32_t flash_bank = 0;
   10281 	uint32_t act_offset = 0;
   10282 	uint32_t bank_offset = 0;
   10283 	uint16_t word = 0;
   10284 	uint16_t i = 0;
   10285 
   10286 	/*
   10287 	 * We need to know which is the valid flash bank.  In the event
   10288 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10289 	 * managing flash_bank.  So it cannot be trusted and needs
   10290 	 * to be updated with each read.
   10291 	 */
   10292 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10293 	if (error) {
   10294 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10295 			device_xname(sc->sc_dev)));
   10296 		flash_bank = 0;
   10297 	}
   10298 
   10299 	/*
   10300 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10301 	 * size
   10302 	 */
   10303 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10304 
   10305 	error = wm_get_swfwhw_semaphore(sc);
   10306 	if (error) {
   10307 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10308 		    __func__);
   10309 		return error;
   10310 	}
   10311 
   10312 	for (i = 0; i < words; i++) {
   10313 		/* The NVM part needs a byte offset, hence * 2 */
   10314 		act_offset = bank_offset + ((offset + i) * 2);
   10315 		error = wm_read_ich8_word(sc, act_offset, &word);
   10316 		if (error) {
   10317 			aprint_error_dev(sc->sc_dev,
   10318 			    "%s: failed to read NVM\n", __func__);
   10319 			break;
   10320 		}
   10321 		data[i] = word;
   10322 	}
   10323 
   10324 	wm_put_swfwhw_semaphore(sc);
   10325 	return error;
   10326 }
   10327 
   10328 /* iNVM */
   10329 
   10330 static int
   10331 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10332 {
   10333 	int32_t  rv = 0;
   10334 	uint32_t invm_dword;
   10335 	uint16_t i;
   10336 	uint8_t record_type, word_address;
   10337 
   10338 	for (i = 0; i < INVM_SIZE; i++) {
   10339 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10340 		/* Get record type */
   10341 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10342 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10343 			break;
   10344 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10345 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10346 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10347 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10348 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10349 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10350 			if (word_address == address) {
   10351 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10352 				rv = 0;
   10353 				break;
   10354 			}
   10355 		}
   10356 	}
   10357 
   10358 	return rv;
   10359 }
   10360 
   10361 static int
   10362 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10363 {
   10364 	int rv = 0;
   10365 	int i;
   10366 
   10367 	for (i = 0; i < words; i++) {
   10368 		switch (offset + i) {
   10369 		case NVM_OFF_MACADDR:
   10370 		case NVM_OFF_MACADDR1:
   10371 		case NVM_OFF_MACADDR2:
   10372 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10373 			if (rv != 0) {
   10374 				data[i] = 0xffff;
   10375 				rv = -1;
   10376 			}
   10377 			break;
   10378 		case NVM_OFF_CFG2:
   10379 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10380 			if (rv != 0) {
   10381 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10382 				rv = 0;
   10383 			}
   10384 			break;
   10385 		case NVM_OFF_CFG4:
   10386 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10387 			if (rv != 0) {
   10388 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10389 				rv = 0;
   10390 			}
   10391 			break;
   10392 		case NVM_OFF_LED_1_CFG:
   10393 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10394 			if (rv != 0) {
   10395 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10396 				rv = 0;
   10397 			}
   10398 			break;
   10399 		case NVM_OFF_LED_0_2_CFG:
   10400 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10401 			if (rv != 0) {
   10402 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10403 				rv = 0;
   10404 			}
   10405 			break;
   10406 		case NVM_OFF_ID_LED_SETTINGS:
   10407 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10408 			if (rv != 0) {
   10409 				*data = ID_LED_RESERVED_FFFF;
   10410 				rv = 0;
   10411 			}
   10412 			break;
   10413 		default:
   10414 			DPRINTF(WM_DEBUG_NVM,
   10415 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10416 			*data = NVM_RESERVED_WORD;
   10417 			break;
   10418 		}
   10419 	}
   10420 
   10421 	return rv;
   10422 }
   10423 
   10424 /* Lock, detecting NVM type, validate checksum, version and read */
   10425 
   10426 /*
   10427  * wm_nvm_acquire:
   10428  *
   10429  *	Perform the EEPROM handshake required on some chips.
   10430  */
   10431 static int
   10432 wm_nvm_acquire(struct wm_softc *sc)
   10433 {
   10434 	uint32_t reg;
   10435 	int x;
   10436 	int ret = 0;
   10437 
   10438 	/* always success */
   10439 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10440 		return 0;
   10441 
   10442 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10443 		ret = wm_get_swfwhw_semaphore(sc);
   10444 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10445 		/* This will also do wm_get_swsm_semaphore() if needed */
   10446 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10447 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10448 		ret = wm_get_swsm_semaphore(sc);
   10449 	}
   10450 
   10451 	if (ret) {
   10452 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10453 			__func__);
   10454 		return 1;
   10455 	}
   10456 
   10457 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10458 		reg = CSR_READ(sc, WMREG_EECD);
   10459 
   10460 		/* Request EEPROM access. */
   10461 		reg |= EECD_EE_REQ;
   10462 		CSR_WRITE(sc, WMREG_EECD, reg);
   10463 
   10464 		/* ..and wait for it to be granted. */
   10465 		for (x = 0; x < 1000; x++) {
   10466 			reg = CSR_READ(sc, WMREG_EECD);
   10467 			if (reg & EECD_EE_GNT)
   10468 				break;
   10469 			delay(5);
   10470 		}
   10471 		if ((reg & EECD_EE_GNT) == 0) {
   10472 			aprint_error_dev(sc->sc_dev,
   10473 			    "could not acquire EEPROM GNT\n");
   10474 			reg &= ~EECD_EE_REQ;
   10475 			CSR_WRITE(sc, WMREG_EECD, reg);
   10476 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10477 				wm_put_swfwhw_semaphore(sc);
   10478 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10479 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10480 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10481 				wm_put_swsm_semaphore(sc);
   10482 			return 1;
   10483 		}
   10484 	}
   10485 
   10486 	return 0;
   10487 }
   10488 
   10489 /*
   10490  * wm_nvm_release:
   10491  *
   10492  *	Release the EEPROM mutex.
   10493  */
   10494 static void
   10495 wm_nvm_release(struct wm_softc *sc)
   10496 {
   10497 	uint32_t reg;
   10498 
   10499 	/* always success */
   10500 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10501 		return;
   10502 
   10503 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10504 		reg = CSR_READ(sc, WMREG_EECD);
   10505 		reg &= ~EECD_EE_REQ;
   10506 		CSR_WRITE(sc, WMREG_EECD, reg);
   10507 	}
   10508 
   10509 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10510 		wm_put_swfwhw_semaphore(sc);
   10511 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10512 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10513 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10514 		wm_put_swsm_semaphore(sc);
   10515 }
   10516 
   10517 static int
   10518 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10519 {
   10520 	uint32_t eecd = 0;
   10521 
   10522 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10523 	    || sc->sc_type == WM_T_82583) {
   10524 		eecd = CSR_READ(sc, WMREG_EECD);
   10525 
   10526 		/* Isolate bits 15 & 16 */
   10527 		eecd = ((eecd >> 15) & 0x03);
   10528 
   10529 		/* If both bits are set, device is Flash type */
   10530 		if (eecd == 0x03)
   10531 			return 0;
   10532 	}
   10533 	return 1;
   10534 }
   10535 
   10536 static int
   10537 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10538 {
   10539 	uint32_t eec;
   10540 
   10541 	eec = CSR_READ(sc, WMREG_EEC);
   10542 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10543 		return 1;
   10544 
   10545 	return 0;
   10546 }
   10547 
   10548 /*
   10549  * wm_nvm_validate_checksum
   10550  *
   10551  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10552  */
   10553 static int
   10554 wm_nvm_validate_checksum(struct wm_softc *sc)
   10555 {
   10556 	uint16_t checksum;
   10557 	uint16_t eeprom_data;
   10558 #ifdef WM_DEBUG
   10559 	uint16_t csum_wordaddr, valid_checksum;
   10560 #endif
   10561 	int i;
   10562 
   10563 	checksum = 0;
   10564 
   10565 	/* Don't check for I211 */
   10566 	if (sc->sc_type == WM_T_I211)
   10567 		return 0;
   10568 
   10569 #ifdef WM_DEBUG
   10570 	if (sc->sc_type == WM_T_PCH_LPT) {
   10571 		csum_wordaddr = NVM_OFF_COMPAT;
   10572 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10573 	} else {
   10574 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10575 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10576 	}
   10577 
   10578 	/* Dump EEPROM image for debug */
   10579 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10580 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10581 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10582 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10583 		if ((eeprom_data & valid_checksum) == 0) {
   10584 			DPRINTF(WM_DEBUG_NVM,
   10585 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10586 				device_xname(sc->sc_dev), eeprom_data,
   10587 				    valid_checksum));
   10588 		}
   10589 	}
   10590 
   10591 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10592 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10593 		for (i = 0; i < NVM_SIZE; i++) {
   10594 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10595 				printf("XXXX ");
   10596 			else
   10597 				printf("%04hx ", eeprom_data);
   10598 			if (i % 8 == 7)
   10599 				printf("\n");
   10600 		}
   10601 	}
   10602 
   10603 #endif /* WM_DEBUG */
   10604 
   10605 	for (i = 0; i < NVM_SIZE; i++) {
   10606 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10607 			return 1;
   10608 		checksum += eeprom_data;
   10609 	}
   10610 
   10611 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10612 #ifdef WM_DEBUG
   10613 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10614 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10615 #endif
   10616 	}
   10617 
   10618 	return 0;
   10619 }
   10620 
   10621 static void
   10622 wm_nvm_version_invm(struct wm_softc *sc)
   10623 {
   10624 	uint32_t dword;
   10625 
   10626 	/*
   10627 	 * Linux's code to decode version is very strange, so we don't
   10628 	 * obey that algorithm and just use word 61 as the document.
   10629 	 * Perhaps it's not perfect though...
   10630 	 *
   10631 	 * Example:
   10632 	 *
   10633 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10634 	 */
   10635 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10636 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10637 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10638 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10639 }
   10640 
   10641 static void
   10642 wm_nvm_version(struct wm_softc *sc)
   10643 {
   10644 	uint16_t major, minor, build, patch;
   10645 	uint16_t uid0, uid1;
   10646 	uint16_t nvm_data;
   10647 	uint16_t off;
   10648 	bool check_version = false;
   10649 	bool check_optionrom = false;
   10650 	bool have_build = false;
   10651 
   10652 	/*
   10653 	 * Version format:
   10654 	 *
   10655 	 * XYYZ
   10656 	 * X0YZ
   10657 	 * X0YY
   10658 	 *
   10659 	 * Example:
   10660 	 *
   10661 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10662 	 *	82571	0x50a6	5.10.6?
   10663 	 *	82572	0x506a	5.6.10?
   10664 	 *	82572EI	0x5069	5.6.9?
   10665 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10666 	 *		0x2013	2.1.3?
   10667 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10668 	 */
   10669 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10670 	switch (sc->sc_type) {
   10671 	case WM_T_82571:
   10672 	case WM_T_82572:
   10673 	case WM_T_82574:
   10674 	case WM_T_82583:
   10675 		check_version = true;
   10676 		check_optionrom = true;
   10677 		have_build = true;
   10678 		break;
   10679 	case WM_T_82575:
   10680 	case WM_T_82576:
   10681 	case WM_T_82580:
   10682 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10683 			check_version = true;
   10684 		break;
   10685 	case WM_T_I211:
   10686 		wm_nvm_version_invm(sc);
   10687 		goto printver;
   10688 	case WM_T_I210:
   10689 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10690 			wm_nvm_version_invm(sc);
   10691 			goto printver;
   10692 		}
   10693 		/* FALLTHROUGH */
   10694 	case WM_T_I350:
   10695 	case WM_T_I354:
   10696 		check_version = true;
   10697 		check_optionrom = true;
   10698 		break;
   10699 	default:
   10700 		return;
   10701 	}
   10702 	if (check_version) {
   10703 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10704 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10705 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10706 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10707 			build = nvm_data & NVM_BUILD_MASK;
   10708 			have_build = true;
   10709 		} else
   10710 			minor = nvm_data & 0x00ff;
   10711 
   10712 		/* Decimal */
   10713 		minor = (minor / 16) * 10 + (minor % 16);
   10714 		sc->sc_nvm_ver_major = major;
   10715 		sc->sc_nvm_ver_minor = minor;
   10716 
   10717 printver:
   10718 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10719 		    sc->sc_nvm_ver_minor);
   10720 		if (have_build) {
   10721 			sc->sc_nvm_ver_build = build;
   10722 			aprint_verbose(".%d", build);
   10723 		}
   10724 	}
   10725 	if (check_optionrom) {
   10726 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10727 		/* Option ROM Version */
   10728 		if ((off != 0x0000) && (off != 0xffff)) {
   10729 			off += NVM_COMBO_VER_OFF;
   10730 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10731 			wm_nvm_read(sc, off, 1, &uid0);
   10732 			if ((uid0 != 0) && (uid0 != 0xffff)
   10733 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10734 				/* 16bits */
   10735 				major = uid0 >> 8;
   10736 				build = (uid0 << 8) | (uid1 >> 8);
   10737 				patch = uid1 & 0x00ff;
   10738 				aprint_verbose(", option ROM Version %d.%d.%d",
   10739 				    major, build, patch);
   10740 			}
   10741 		}
   10742 	}
   10743 
   10744 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10745 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10746 }
   10747 
   10748 /*
   10749  * wm_nvm_read:
   10750  *
   10751  *	Read data from the serial EEPROM.
   10752  */
   10753 static int
   10754 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10755 {
   10756 	int rv;
   10757 
   10758 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10759 		return 1;
   10760 
   10761 	if (wm_nvm_acquire(sc))
   10762 		return 1;
   10763 
   10764 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10765 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10766 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10767 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10768 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10769 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10770 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10771 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10772 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10773 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10774 	else
   10775 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10776 
   10777 	wm_nvm_release(sc);
   10778 	return rv;
   10779 }
   10780 
   10781 /*
   10782  * Hardware semaphores.
   10783  * Very complexed...
   10784  */
   10785 
   10786 static int
   10787 wm_get_swsm_semaphore(struct wm_softc *sc)
   10788 {
   10789 	int32_t timeout;
   10790 	uint32_t swsm;
   10791 
   10792 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10793 		/* Get the SW semaphore. */
   10794 		timeout = sc->sc_nvm_wordsize + 1;
   10795 		while (timeout) {
   10796 			swsm = CSR_READ(sc, WMREG_SWSM);
   10797 
   10798 			if ((swsm & SWSM_SMBI) == 0)
   10799 				break;
   10800 
   10801 			delay(50);
   10802 			timeout--;
   10803 		}
   10804 
   10805 		if (timeout == 0) {
   10806 			aprint_error_dev(sc->sc_dev,
   10807 			    "could not acquire SWSM SMBI\n");
   10808 			return 1;
   10809 		}
   10810 	}
   10811 
   10812 	/* Get the FW semaphore. */
   10813 	timeout = sc->sc_nvm_wordsize + 1;
   10814 	while (timeout) {
   10815 		swsm = CSR_READ(sc, WMREG_SWSM);
   10816 		swsm |= SWSM_SWESMBI;
   10817 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   10818 		/* If we managed to set the bit we got the semaphore. */
   10819 		swsm = CSR_READ(sc, WMREG_SWSM);
   10820 		if (swsm & SWSM_SWESMBI)
   10821 			break;
   10822 
   10823 		delay(50);
   10824 		timeout--;
   10825 	}
   10826 
   10827 	if (timeout == 0) {
   10828 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   10829 		/* Release semaphores */
   10830 		wm_put_swsm_semaphore(sc);
   10831 		return 1;
   10832 	}
   10833 	return 0;
   10834 }
   10835 
   10836 static void
   10837 wm_put_swsm_semaphore(struct wm_softc *sc)
   10838 {
   10839 	uint32_t swsm;
   10840 
   10841 	swsm = CSR_READ(sc, WMREG_SWSM);
   10842 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   10843 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   10844 }
   10845 
   10846 static int
   10847 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10848 {
   10849 	uint32_t swfw_sync;
   10850 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   10851 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   10852 	int timeout = 200;
   10853 
   10854 	for (timeout = 0; timeout < 200; timeout++) {
   10855 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10856 			if (wm_get_swsm_semaphore(sc)) {
   10857 				aprint_error_dev(sc->sc_dev,
   10858 				    "%s: failed to get semaphore\n",
   10859 				    __func__);
   10860 				return 1;
   10861 			}
   10862 		}
   10863 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10864 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   10865 			swfw_sync |= swmask;
   10866 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10867 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   10868 				wm_put_swsm_semaphore(sc);
   10869 			return 0;
   10870 		}
   10871 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   10872 			wm_put_swsm_semaphore(sc);
   10873 		delay(5000);
   10874 	}
   10875 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   10876 	    device_xname(sc->sc_dev), mask, swfw_sync);
   10877 	return 1;
   10878 }
   10879 
   10880 static void
   10881 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10882 {
   10883 	uint32_t swfw_sync;
   10884 
   10885 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10886 		while (wm_get_swsm_semaphore(sc) != 0)
   10887 			continue;
   10888 	}
   10889 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10890 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   10891 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10892 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   10893 		wm_put_swsm_semaphore(sc);
   10894 }
   10895 
   10896 static int
   10897 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   10898 {
   10899 	uint32_t ext_ctrl;
   10900 	int timeout = 200;
   10901 
   10902 	for (timeout = 0; timeout < 200; timeout++) {
   10903 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10904 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10905 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10906 
   10907 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10908 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   10909 			return 0;
   10910 		delay(5000);
   10911 	}
   10912 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   10913 	    device_xname(sc->sc_dev), ext_ctrl);
   10914 	return 1;
   10915 }
   10916 
   10917 static void
   10918 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   10919 {
   10920 	uint32_t ext_ctrl;
   10921 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10922 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10923 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10924 }
   10925 
   10926 static int
   10927 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   10928 {
   10929 	int i = 0;
   10930 	uint32_t reg;
   10931 
   10932 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10933 	do {
   10934 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   10935 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   10936 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10937 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   10938 			break;
   10939 		delay(2*1000);
   10940 		i++;
   10941 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   10942 
   10943 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   10944 		wm_put_hw_semaphore_82573(sc);
   10945 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   10946 		    device_xname(sc->sc_dev));
   10947 		return -1;
   10948 	}
   10949 
   10950 	return 0;
   10951 }
   10952 
   10953 static void
   10954 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   10955 {
   10956 	uint32_t reg;
   10957 
   10958 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10959 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10960 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10961 }
   10962 
   10963 /*
   10964  * Management mode and power management related subroutines.
   10965  * BMC, AMT, suspend/resume and EEE.
   10966  */
   10967 
   10968 static int
   10969 wm_check_mng_mode(struct wm_softc *sc)
   10970 {
   10971 	int rv;
   10972 
   10973 	switch (sc->sc_type) {
   10974 	case WM_T_ICH8:
   10975 	case WM_T_ICH9:
   10976 	case WM_T_ICH10:
   10977 	case WM_T_PCH:
   10978 	case WM_T_PCH2:
   10979 	case WM_T_PCH_LPT:
   10980 		rv = wm_check_mng_mode_ich8lan(sc);
   10981 		break;
   10982 	case WM_T_82574:
   10983 	case WM_T_82583:
   10984 		rv = wm_check_mng_mode_82574(sc);
   10985 		break;
   10986 	case WM_T_82571:
   10987 	case WM_T_82572:
   10988 	case WM_T_82573:
   10989 	case WM_T_80003:
   10990 		rv = wm_check_mng_mode_generic(sc);
   10991 		break;
   10992 	default:
   10993 		/* noting to do */
   10994 		rv = 0;
   10995 		break;
   10996 	}
   10997 
   10998 	return rv;
   10999 }
   11000 
   11001 static int
   11002 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11003 {
   11004 	uint32_t fwsm;
   11005 
   11006 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11007 
   11008 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   11009 		return 1;
   11010 
   11011 	return 0;
   11012 }
   11013 
   11014 static int
   11015 wm_check_mng_mode_82574(struct wm_softc *sc)
   11016 {
   11017 	uint16_t data;
   11018 
   11019 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11020 
   11021 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11022 		return 1;
   11023 
   11024 	return 0;
   11025 }
   11026 
   11027 static int
   11028 wm_check_mng_mode_generic(struct wm_softc *sc)
   11029 {
   11030 	uint32_t fwsm;
   11031 
   11032 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11033 
   11034 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   11035 		return 1;
   11036 
   11037 	return 0;
   11038 }
   11039 
   11040 static int
   11041 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11042 {
   11043 	uint32_t manc, fwsm, factps;
   11044 
   11045 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11046 		return 0;
   11047 
   11048 	manc = CSR_READ(sc, WMREG_MANC);
   11049 
   11050 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11051 		device_xname(sc->sc_dev), manc));
   11052 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11053 		return 0;
   11054 
   11055 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11056 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11057 		factps = CSR_READ(sc, WMREG_FACTPS);
   11058 		if (((factps & FACTPS_MNGCG) == 0)
   11059 		    && ((fwsm & FWSM_MODE_MASK)
   11060 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   11061 			return 1;
   11062 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11063 		uint16_t data;
   11064 
   11065 		factps = CSR_READ(sc, WMREG_FACTPS);
   11066 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11067 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11068 			device_xname(sc->sc_dev), factps, data));
   11069 		if (((factps & FACTPS_MNGCG) == 0)
   11070 		    && ((data & NVM_CFG2_MNGM_MASK)
   11071 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11072 			return 1;
   11073 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11074 	    && ((manc & MANC_ASF_EN) == 0))
   11075 		return 1;
   11076 
   11077 	return 0;
   11078 }
   11079 
   11080 static int
   11081 wm_check_reset_block(struct wm_softc *sc)
   11082 {
   11083 	uint32_t reg;
   11084 
   11085 	switch (sc->sc_type) {
   11086 	case WM_T_ICH8:
   11087 	case WM_T_ICH9:
   11088 	case WM_T_ICH10:
   11089 	case WM_T_PCH:
   11090 	case WM_T_PCH2:
   11091 	case WM_T_PCH_LPT:
   11092 		reg = CSR_READ(sc, WMREG_FWSM);
   11093 		if ((reg & FWSM_RSPCIPHY) != 0)
   11094 			return 0;
   11095 		else
   11096 			return -1;
   11097 		break;
   11098 	case WM_T_82571:
   11099 	case WM_T_82572:
   11100 	case WM_T_82573:
   11101 	case WM_T_82574:
   11102 	case WM_T_82583:
   11103 	case WM_T_80003:
   11104 		reg = CSR_READ(sc, WMREG_MANC);
   11105 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11106 			return -1;
   11107 		else
   11108 			return 0;
   11109 		break;
   11110 	default:
   11111 		/* no problem */
   11112 		break;
   11113 	}
   11114 
   11115 	return 0;
   11116 }
   11117 
   11118 static void
   11119 wm_get_hw_control(struct wm_softc *sc)
   11120 {
   11121 	uint32_t reg;
   11122 
   11123 	switch (sc->sc_type) {
   11124 	case WM_T_82573:
   11125 		reg = CSR_READ(sc, WMREG_SWSM);
   11126 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11127 		break;
   11128 	case WM_T_82571:
   11129 	case WM_T_82572:
   11130 	case WM_T_82574:
   11131 	case WM_T_82583:
   11132 	case WM_T_80003:
   11133 	case WM_T_ICH8:
   11134 	case WM_T_ICH9:
   11135 	case WM_T_ICH10:
   11136 	case WM_T_PCH:
   11137 	case WM_T_PCH2:
   11138 	case WM_T_PCH_LPT:
   11139 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11140 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11141 		break;
   11142 	default:
   11143 		break;
   11144 	}
   11145 }
   11146 
   11147 static void
   11148 wm_release_hw_control(struct wm_softc *sc)
   11149 {
   11150 	uint32_t reg;
   11151 
   11152 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11153 		return;
   11154 
   11155 	if (sc->sc_type == WM_T_82573) {
   11156 		reg = CSR_READ(sc, WMREG_SWSM);
   11157 		reg &= ~SWSM_DRV_LOAD;
   11158 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11159 	} else {
   11160 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11161 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11162 	}
   11163 }
   11164 
   11165 static void
   11166 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   11167 {
   11168 	uint32_t reg;
   11169 
   11170 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11171 
   11172 	if (on != 0)
   11173 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11174 	else
   11175 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11176 
   11177 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11178 }
   11179 
   11180 static void
   11181 wm_smbustopci(struct wm_softc *sc)
   11182 {
   11183 	uint32_t fwsm;
   11184 
   11185 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11186 	if (((fwsm & FWSM_FW_VALID) == 0)
   11187 	    && ((wm_check_reset_block(sc) == 0))) {
   11188 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11189 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11190 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11191 		CSR_WRITE_FLUSH(sc);
   11192 		delay(10);
   11193 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11194 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11195 		CSR_WRITE_FLUSH(sc);
   11196 		delay(50*1000);
   11197 
   11198 		/*
   11199 		 * Gate automatic PHY configuration by hardware on non-managed
   11200 		 * 82579
   11201 		 */
   11202 		if (sc->sc_type == WM_T_PCH2)
   11203 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   11204 	}
   11205 }
   11206 
   11207 static void
   11208 wm_init_manageability(struct wm_softc *sc)
   11209 {
   11210 
   11211 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11212 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11213 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11214 
   11215 		/* Disable hardware interception of ARP */
   11216 		manc &= ~MANC_ARP_EN;
   11217 
   11218 		/* Enable receiving management packets to the host */
   11219 		if (sc->sc_type >= WM_T_82571) {
   11220 			manc |= MANC_EN_MNG2HOST;
   11221 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11222 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11223 		}
   11224 
   11225 		CSR_WRITE(sc, WMREG_MANC, manc);
   11226 	}
   11227 }
   11228 
   11229 static void
   11230 wm_release_manageability(struct wm_softc *sc)
   11231 {
   11232 
   11233 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11234 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11235 
   11236 		manc |= MANC_ARP_EN;
   11237 		if (sc->sc_type >= WM_T_82571)
   11238 			manc &= ~MANC_EN_MNG2HOST;
   11239 
   11240 		CSR_WRITE(sc, WMREG_MANC, manc);
   11241 	}
   11242 }
   11243 
   11244 static void
   11245 wm_get_wakeup(struct wm_softc *sc)
   11246 {
   11247 
   11248 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11249 	switch (sc->sc_type) {
   11250 	case WM_T_82573:
   11251 	case WM_T_82583:
   11252 		sc->sc_flags |= WM_F_HAS_AMT;
   11253 		/* FALLTHROUGH */
   11254 	case WM_T_80003:
   11255 	case WM_T_82541:
   11256 	case WM_T_82547:
   11257 	case WM_T_82571:
   11258 	case WM_T_82572:
   11259 	case WM_T_82574:
   11260 	case WM_T_82575:
   11261 	case WM_T_82576:
   11262 	case WM_T_82580:
   11263 	case WM_T_I350:
   11264 	case WM_T_I354:
   11265 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   11266 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11267 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11268 		break;
   11269 	case WM_T_ICH8:
   11270 	case WM_T_ICH9:
   11271 	case WM_T_ICH10:
   11272 	case WM_T_PCH:
   11273 	case WM_T_PCH2:
   11274 	case WM_T_PCH_LPT:
   11275 		sc->sc_flags |= WM_F_HAS_AMT;
   11276 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11277 		break;
   11278 	default:
   11279 		break;
   11280 	}
   11281 
   11282 	/* 1: HAS_MANAGE */
   11283 	if (wm_enable_mng_pass_thru(sc) != 0)
   11284 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11285 
   11286 #ifdef WM_DEBUG
   11287 	printf("\n");
   11288 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11289 		printf("HAS_AMT,");
   11290 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11291 		printf("ARC_SUBSYS_VALID,");
   11292 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11293 		printf("ASF_FIRMWARE_PRES,");
   11294 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11295 		printf("HAS_MANAGE,");
   11296 	printf("\n");
   11297 #endif
   11298 	/*
   11299 	 * Note that the WOL flags is set after the resetting of the eeprom
   11300 	 * stuff
   11301 	 */
   11302 }
   11303 
   11304 #ifdef WM_WOL
   11305 /* WOL in the newer chipset interfaces (pchlan) */
   11306 static void
   11307 wm_enable_phy_wakeup(struct wm_softc *sc)
   11308 {
   11309 #if 0
   11310 	uint16_t preg;
   11311 
   11312 	/* Copy MAC RARs to PHY RARs */
   11313 
   11314 	/* Copy MAC MTA to PHY MTA */
   11315 
   11316 	/* Configure PHY Rx Control register */
   11317 
   11318 	/* Enable PHY wakeup in MAC register */
   11319 
   11320 	/* Configure and enable PHY wakeup in PHY registers */
   11321 
   11322 	/* Activate PHY wakeup */
   11323 
   11324 	/* XXX */
   11325 #endif
   11326 }
   11327 
   11328 /* Power down workaround on D3 */
   11329 static void
   11330 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11331 {
   11332 	uint32_t reg;
   11333 	int i;
   11334 
   11335 	for (i = 0; i < 2; i++) {
   11336 		/* Disable link */
   11337 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11338 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11339 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11340 
   11341 		/*
   11342 		 * Call gig speed drop workaround on Gig disable before
   11343 		 * accessing any PHY registers
   11344 		 */
   11345 		if (sc->sc_type == WM_T_ICH8)
   11346 			wm_gig_downshift_workaround_ich8lan(sc);
   11347 
   11348 		/* Write VR power-down enable */
   11349 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11350 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11351 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11352 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11353 
   11354 		/* Read it back and test */
   11355 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11356 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11357 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11358 			break;
   11359 
   11360 		/* Issue PHY reset and repeat at most one more time */
   11361 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11362 	}
   11363 }
   11364 
   11365 static void
   11366 wm_enable_wakeup(struct wm_softc *sc)
   11367 {
   11368 	uint32_t reg, pmreg;
   11369 	pcireg_t pmode;
   11370 
   11371 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11372 		&pmreg, NULL) == 0)
   11373 		return;
   11374 
   11375 	/* Advertise the wakeup capability */
   11376 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11377 	    | CTRL_SWDPIN(3));
   11378 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11379 
   11380 	/* ICH workaround */
   11381 	switch (sc->sc_type) {
   11382 	case WM_T_ICH8:
   11383 	case WM_T_ICH9:
   11384 	case WM_T_ICH10:
   11385 	case WM_T_PCH:
   11386 	case WM_T_PCH2:
   11387 	case WM_T_PCH_LPT:
   11388 		/* Disable gig during WOL */
   11389 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11390 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11391 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11392 		if (sc->sc_type == WM_T_PCH)
   11393 			wm_gmii_reset(sc);
   11394 
   11395 		/* Power down workaround */
   11396 		if (sc->sc_phytype == WMPHY_82577) {
   11397 			struct mii_softc *child;
   11398 
   11399 			/* Assume that the PHY is copper */
   11400 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11401 			if (child->mii_mpd_rev <= 2)
   11402 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11403 				    (768 << 5) | 25, 0x0444); /* magic num */
   11404 		}
   11405 		break;
   11406 	default:
   11407 		break;
   11408 	}
   11409 
   11410 	/* Keep the laser running on fiber adapters */
   11411 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11412 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11413 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11414 		reg |= CTRL_EXT_SWDPIN(3);
   11415 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11416 	}
   11417 
   11418 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11419 #if 0	/* for the multicast packet */
   11420 	reg |= WUFC_MC;
   11421 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11422 #endif
   11423 
   11424 	if (sc->sc_type == WM_T_PCH) {
   11425 		wm_enable_phy_wakeup(sc);
   11426 	} else {
   11427 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11428 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11429 	}
   11430 
   11431 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11432 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11433 		|| (sc->sc_type == WM_T_PCH2))
   11434 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11435 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11436 
   11437 	/* Request PME */
   11438 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11439 #if 0
   11440 	/* Disable WOL */
   11441 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11442 #else
   11443 	/* For WOL */
   11444 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11445 #endif
   11446 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11447 }
   11448 #endif /* WM_WOL */
   11449 
   11450 /* EEE */
   11451 
   11452 static void
   11453 wm_set_eee_i350(struct wm_softc *sc)
   11454 {
   11455 	uint32_t ipcnfg, eeer;
   11456 
   11457 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11458 	eeer = CSR_READ(sc, WMREG_EEER);
   11459 
   11460 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11461 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11462 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11463 		    | EEER_LPI_FC);
   11464 	} else {
   11465 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11466 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11467 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11468 		    | EEER_LPI_FC);
   11469 	}
   11470 
   11471 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11472 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11473 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11474 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11475 }
   11476 
   11477 /*
   11478  * Workarounds (mainly PHY related).
   11479  * Basically, PHY's workarounds are in the PHY drivers.
   11480  */
   11481 
   11482 /* Work-around for 82566 Kumeran PCS lock loss */
   11483 static void
   11484 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11485 {
   11486 	int miistatus, active, i;
   11487 	int reg;
   11488 
   11489 	miistatus = sc->sc_mii.mii_media_status;
   11490 
   11491 	/* If the link is not up, do nothing */
   11492 	if ((miistatus & IFM_ACTIVE) != 0)
   11493 		return;
   11494 
   11495 	active = sc->sc_mii.mii_media_active;
   11496 
   11497 	/* Nothing to do if the link is other than 1Gbps */
   11498 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11499 		return;
   11500 
   11501 	for (i = 0; i < 10; i++) {
   11502 		/* read twice */
   11503 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11504 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11505 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   11506 			goto out;	/* GOOD! */
   11507 
   11508 		/* Reset the PHY */
   11509 		wm_gmii_reset(sc);
   11510 		delay(5*1000);
   11511 	}
   11512 
   11513 	/* Disable GigE link negotiation */
   11514 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11515 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11516 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11517 
   11518 	/*
   11519 	 * Call gig speed drop workaround on Gig disable before accessing
   11520 	 * any PHY registers.
   11521 	 */
   11522 	wm_gig_downshift_workaround_ich8lan(sc);
   11523 
   11524 out:
   11525 	return;
   11526 }
   11527 
   11528 /* WOL from S5 stops working */
   11529 static void
   11530 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11531 {
   11532 	uint16_t kmrn_reg;
   11533 
   11534 	/* Only for igp3 */
   11535 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11536 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11537 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11538 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11539 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11540 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11541 	}
   11542 }
   11543 
   11544 /*
   11545  * Workaround for pch's PHYs
   11546  * XXX should be moved to new PHY driver?
   11547  */
   11548 static void
   11549 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11550 {
   11551 	if (sc->sc_phytype == WMPHY_82577)
   11552 		wm_set_mdio_slow_mode_hv(sc);
   11553 
   11554 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11555 
   11556 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11557 
   11558 	/* 82578 */
   11559 	if (sc->sc_phytype == WMPHY_82578) {
   11560 		/* PCH rev. < 3 */
   11561 		if (sc->sc_rev < 3) {
   11562 			/* XXX 6 bit shift? Why? Is it page2? */
   11563 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11564 			    0x66c0);
   11565 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11566 			    0xffff);
   11567 		}
   11568 
   11569 		/* XXX phy rev. < 2 */
   11570 	}
   11571 
   11572 	/* Select page 0 */
   11573 
   11574 	/* XXX acquire semaphore */
   11575 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11576 	/* XXX release semaphore */
   11577 
   11578 	/*
   11579 	 * Configure the K1 Si workaround during phy reset assuming there is
   11580 	 * link so that it disables K1 if link is in 1Gbps.
   11581 	 */
   11582 	wm_k1_gig_workaround_hv(sc, 1);
   11583 }
   11584 
   11585 static void
   11586 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11587 {
   11588 
   11589 	wm_set_mdio_slow_mode_hv(sc);
   11590 }
   11591 
   11592 static void
   11593 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11594 {
   11595 	int k1_enable = sc->sc_nvm_k1_enabled;
   11596 
   11597 	/* XXX acquire semaphore */
   11598 
   11599 	if (link) {
   11600 		k1_enable = 0;
   11601 
   11602 		/* Link stall fix for link up */
   11603 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11604 	} else {
   11605 		/* Link stall fix for link down */
   11606 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11607 	}
   11608 
   11609 	wm_configure_k1_ich8lan(sc, k1_enable);
   11610 
   11611 	/* XXX release semaphore */
   11612 }
   11613 
   11614 static void
   11615 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11616 {
   11617 	uint32_t reg;
   11618 
   11619 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11620 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11621 	    reg | HV_KMRN_MDIO_SLOW);
   11622 }
   11623 
   11624 static void
   11625 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11626 {
   11627 	uint32_t ctrl, ctrl_ext, tmp;
   11628 	uint16_t kmrn_reg;
   11629 
   11630 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11631 
   11632 	if (k1_enable)
   11633 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11634 	else
   11635 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11636 
   11637 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11638 
   11639 	delay(20);
   11640 
   11641 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11642 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11643 
   11644 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11645 	tmp |= CTRL_FRCSPD;
   11646 
   11647 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11648 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11649 	CSR_WRITE_FLUSH(sc);
   11650 	delay(20);
   11651 
   11652 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11653 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11654 	CSR_WRITE_FLUSH(sc);
   11655 	delay(20);
   11656 }
   11657 
   11658 /* special case - for 82575 - need to do manual init ... */
   11659 static void
   11660 wm_reset_init_script_82575(struct wm_softc *sc)
   11661 {
   11662 	/*
   11663 	 * remark: this is untested code - we have no board without EEPROM
   11664 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11665 	 */
   11666 
   11667 	/* SerDes configuration via SERDESCTRL */
   11668 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11669 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11670 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11671 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11672 
   11673 	/* CCM configuration via CCMCTL register */
   11674 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11675 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11676 
   11677 	/* PCIe lanes configuration */
   11678 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11679 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11680 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11681 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11682 
   11683 	/* PCIe PLL Configuration */
   11684 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11685 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11686 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11687 }
   11688 
   11689 static void
   11690 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11691 {
   11692 	uint32_t reg;
   11693 	uint16_t nvmword;
   11694 	int rv;
   11695 
   11696 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11697 		return;
   11698 
   11699 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11700 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11701 	if (rv != 0) {
   11702 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11703 		    __func__);
   11704 		return;
   11705 	}
   11706 
   11707 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11708 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11709 		reg |= MDICNFG_DEST;
   11710 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11711 		reg |= MDICNFG_COM_MDIO;
   11712 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11713 }
   11714 
   11715 /*
   11716  * I210 Errata 25 and I211 Errata 10
   11717  * Slow System Clock.
   11718  */
   11719 static void
   11720 wm_pll_workaround_i210(struct wm_softc *sc)
   11721 {
   11722 	uint32_t mdicnfg, wuc;
   11723 	uint32_t reg;
   11724 	pcireg_t pcireg;
   11725 	uint32_t pmreg;
   11726 	uint16_t nvmword, tmp_nvmword;
   11727 	int phyval;
   11728 	bool wa_done = false;
   11729 	int i;
   11730 
   11731 	/* Save WUC and MDICNFG registers */
   11732 	wuc = CSR_READ(sc, WMREG_WUC);
   11733 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   11734 
   11735 	reg = mdicnfg & ~MDICNFG_DEST;
   11736 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11737 
   11738 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   11739 		nvmword = INVM_DEFAULT_AL;
   11740 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   11741 
   11742 	/* Get Power Management cap offset */
   11743 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11744 		&pmreg, NULL) == 0)
   11745 		return;
   11746 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   11747 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   11748 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   11749 
   11750 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   11751 			break; /* OK */
   11752 		}
   11753 
   11754 		wa_done = true;
   11755 		/* Directly reset the internal PHY */
   11756 		reg = CSR_READ(sc, WMREG_CTRL);
   11757 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   11758 
   11759 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11760 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   11761 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11762 
   11763 		CSR_WRITE(sc, WMREG_WUC, 0);
   11764 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   11765 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11766 
   11767 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   11768 		    pmreg + PCI_PMCSR);
   11769 		pcireg |= PCI_PMCSR_STATE_D3;
   11770 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11771 		    pmreg + PCI_PMCSR, pcireg);
   11772 		delay(1000);
   11773 		pcireg &= ~PCI_PMCSR_STATE_D3;
   11774 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11775 		    pmreg + PCI_PMCSR, pcireg);
   11776 
   11777 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   11778 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11779 
   11780 		/* Restore WUC register */
   11781 		CSR_WRITE(sc, WMREG_WUC, wuc);
   11782 	}
   11783 
   11784 	/* Restore MDICNFG setting */
   11785 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   11786 	if (wa_done)
   11787 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   11788 }
   11789