Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.391
      1 /*	$NetBSD: if_wm.c,v 1.391 2016/02/09 08:32:11 ozaki-r Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.391 2016/02/09 08:32:11 ozaki-r Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    151 
    152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    153 #else
    154 #define	DPRINTF(x, y)	/* nothing */
    155 #endif /* WM_DEBUG */
    156 
    157 #ifdef NET_MPSAFE
    158 #define WM_MPSAFE	1
    159 #endif
    160 
    161 /*
    162  * This device driver's max interrupt numbers.
    163  */
    164 #define WM_MAX_NTXINTR		16
    165 #define WM_MAX_NRXINTR		16
    166 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    167 
    168 /*
    169  * Transmit descriptor list size.  Due to errata, we can only have
    170  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    171  * on >= 82544.  We tell the upper layers that they can queue a lot
    172  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    173  * of them at a time.
    174  *
    175  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    176  * chains containing many small mbufs have been observed in zero-copy
    177  * situations with jumbo frames.
    178  */
    179 #define	WM_NTXSEGS		256
    180 #define	WM_IFQUEUELEN		256
    181 #define	WM_TXQUEUELEN_MAX	64
    182 #define	WM_TXQUEUELEN_MAX_82547	16
    183 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    184 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    185 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    186 #define	WM_NTXDESC_82542	256
    187 #define	WM_NTXDESC_82544	4096
    188 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    189 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    190 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    191 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    192 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    193 
    194 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    195 
    196 /*
    197  * Receive descriptor list size.  We have one Rx buffer for normal
    198  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    199  * packet.  We allocate 256 receive descriptors, each with a 2k
    200  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    201  */
    202 #define	WM_NRXDESC		256
    203 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    204 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    205 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    206 
    207 typedef union txdescs {
    208 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    209 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    210 } txdescs_t;
    211 
    212 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    213 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    214 
    215 /*
    216  * Software state for transmit jobs.
    217  */
    218 struct wm_txsoft {
    219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    221 	int txs_firstdesc;		/* first descriptor in packet */
    222 	int txs_lastdesc;		/* last descriptor in packet */
    223 	int txs_ndesc;			/* # of descriptors used */
    224 };
    225 
    226 /*
    227  * Software state for receive buffers.  Each descriptor gets a
    228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    229  * more than one buffer, we chain them together.
    230  */
    231 struct wm_rxsoft {
    232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    234 };
    235 
    236 #define WM_LINKUP_TIMEOUT	50
    237 
    238 static uint16_t swfwphysem[] = {
    239 	SWFW_PHY0_SM,
    240 	SWFW_PHY1_SM,
    241 	SWFW_PHY2_SM,
    242 	SWFW_PHY3_SM
    243 };
    244 
    245 static const uint32_t wm_82580_rxpbs_table[] = {
    246 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    247 };
    248 
    249 struct wm_softc;
    250 
    251 struct wm_txqueue {
    252 	kmutex_t *txq_lock;		/* lock for tx operations */
    253 
    254 	struct wm_softc *txq_sc;
    255 
    256 	int txq_id;			/* index of transmit queues */
    257 	int txq_intr_idx;		/* index of MSI-X tables */
    258 
    259 	/* Software state for the transmit descriptors. */
    260 	int txq_num;			/* must be a power of two */
    261 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    262 
    263 	/* TX control data structures. */
    264 	int txq_ndesc;			/* must be a power of two */
    265 	txdescs_t *txq_descs_u;
    266         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    267 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    268 	int txq_desc_rseg;		/* real number of control segment */
    269 	size_t txq_desc_size;		/* control data size */
    270 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    271 #define	txq_descs	txq_descs_u->sctxu_txdescs
    272 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    273 
    274 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    275 
    276 	int txq_free;			/* number of free Tx descriptors */
    277 	int txq_next;			/* next ready Tx descriptor */
    278 
    279 	int txq_sfree;			/* number of free Tx jobs */
    280 	int txq_snext;			/* next free Tx job */
    281 	int txq_sdirty;			/* dirty Tx jobs */
    282 
    283 	/* These 4 variables are used only on the 82547. */
    284 	int txq_fifo_size;		/* Tx FIFO size */
    285 	int txq_fifo_head;		/* current head of FIFO */
    286 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    287 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    288 
    289 	/* XXX which event counter is required? */
    290 };
    291 
    292 struct wm_rxqueue {
    293 	kmutex_t *rxq_lock;		/* lock for rx operations */
    294 
    295 	struct wm_softc *rxq_sc;
    296 
    297 	int rxq_id;			/* index of receive queues */
    298 	int rxq_intr_idx;		/* index of MSI-X tables */
    299 
    300 	/* Software state for the receive descriptors. */
    301 	wiseman_rxdesc_t *rxq_descs;
    302 
    303 	/* RX control data structures. */
    304 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    305 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    306 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    307 	int rxq_desc_rseg;		/* real number of control segment */
    308 	size_t rxq_desc_size;		/* control data size */
    309 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    310 
    311 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    312 
    313 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    314 	int rxq_discard;
    315 	int rxq_len;
    316 	struct mbuf *rxq_head;
    317 	struct mbuf *rxq_tail;
    318 	struct mbuf **rxq_tailp;
    319 
    320 	/* XXX which event counter is required? */
    321 };
    322 
    323 /*
    324  * Software state per device.
    325  */
    326 struct wm_softc {
    327 	device_t sc_dev;		/* generic device information */
    328 	bus_space_tag_t sc_st;		/* bus space tag */
    329 	bus_space_handle_t sc_sh;	/* bus space handle */
    330 	bus_size_t sc_ss;		/* bus space size */
    331 	bus_space_tag_t sc_iot;		/* I/O space tag */
    332 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    333 	bus_size_t sc_ios;		/* I/O space size */
    334 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    335 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    336 	bus_size_t sc_flashs;		/* flash registers space size */
    337 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    338 
    339 	struct ethercom sc_ethercom;	/* ethernet common data */
    340 	struct mii_data sc_mii;		/* MII/media information */
    341 
    342 	pci_chipset_tag_t sc_pc;
    343 	pcitag_t sc_pcitag;
    344 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    345 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    346 
    347 	uint16_t sc_pcidevid;		/* PCI device ID */
    348 	wm_chip_type sc_type;		/* MAC type */
    349 	int sc_rev;			/* MAC revision */
    350 	wm_phy_type sc_phytype;		/* PHY type */
    351 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    352 #define	WM_MEDIATYPE_UNKNOWN		0x00
    353 #define	WM_MEDIATYPE_FIBER		0x01
    354 #define	WM_MEDIATYPE_COPPER		0x02
    355 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    356 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    357 	int sc_flags;			/* flags; see below */
    358 	int sc_if_flags;		/* last if_flags */
    359 	int sc_flowflags;		/* 802.3x flow control flags */
    360 	int sc_align_tweak;
    361 
    362 	void *sc_ihs[WM_MAX_NINTR];	/*
    363 					 * interrupt cookie.
    364 					 * legacy and msi use sc_ihs[0].
    365 					 */
    366 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    367 	int sc_nintrs;			/* number of interrupts */
    368 
    369 	int sc_link_intr_idx;		/* index of MSI-X tables */
    370 
    371 	callout_t sc_tick_ch;		/* tick callout */
    372 	bool sc_stopping;
    373 
    374 	int sc_nvm_ver_major;
    375 	int sc_nvm_ver_minor;
    376 	int sc_nvm_ver_build;
    377 	int sc_nvm_addrbits;		/* NVM address bits */
    378 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    379 	int sc_ich8_flash_base;
    380 	int sc_ich8_flash_bank_size;
    381 	int sc_nvm_k1_enabled;
    382 
    383 	int sc_ntxqueues;
    384 	struct wm_txqueue *sc_txq;
    385 
    386 	int sc_nrxqueues;
    387 	struct wm_rxqueue *sc_rxq;
    388 
    389 #ifdef WM_EVENT_COUNTERS
    390 	/* Event counters. */
    391 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    392 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    393 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    394 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    395 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    396 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    397 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    398 
    399 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    400 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    401 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    402 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    403 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    404 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    405 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    406 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    407 
    408 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
    410 
    411 	struct evcnt sc_ev_tu;		/* Tx underrun */
    412 
    413 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    414 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    415 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    416 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    417 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    418 #endif /* WM_EVENT_COUNTERS */
    419 
    420 	/* This variable are used only on the 82547. */
    421 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    422 
    423 	uint32_t sc_ctrl;		/* prototype CTRL register */
    424 #if 0
    425 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    426 #endif
    427 	uint32_t sc_icr;		/* prototype interrupt bits */
    428 	uint32_t sc_itr;		/* prototype intr throttling reg */
    429 	uint32_t sc_tctl;		/* prototype TCTL register */
    430 	uint32_t sc_rctl;		/* prototype RCTL register */
    431 	uint32_t sc_txcw;		/* prototype TXCW register */
    432 	uint32_t sc_tipg;		/* prototype TIPG register */
    433 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    434 	uint32_t sc_pba;		/* prototype PBA register */
    435 
    436 	int sc_tbi_linkup;		/* TBI link status */
    437 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    438 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    439 
    440 	int sc_mchash_type;		/* multicast filter offset */
    441 
    442 	krndsource_t rnd_source;	/* random source */
    443 
    444 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    445 
    446 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    447 };
    448 
    449 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    450 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    451 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    452 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    453 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    454 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    455 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    456 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    457 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    458 
    459 #ifdef WM_MPSAFE
    460 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    461 #else
    462 #define CALLOUT_FLAGS	0
    463 #endif
    464 
    465 #define	WM_RXCHAIN_RESET(rxq)						\
    466 do {									\
    467 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    468 	*(rxq)->rxq_tailp = NULL;					\
    469 	(rxq)->rxq_len = 0;						\
    470 } while (/*CONSTCOND*/0)
    471 
    472 #define	WM_RXCHAIN_LINK(rxq, m)						\
    473 do {									\
    474 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    475 	(rxq)->rxq_tailp = &(m)->m_next;				\
    476 } while (/*CONSTCOND*/0)
    477 
    478 #ifdef WM_EVENT_COUNTERS
    479 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    480 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    481 #else
    482 #define	WM_EVCNT_INCR(ev)	/* nothing */
    483 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    484 #endif
    485 
    486 #define	CSR_READ(sc, reg)						\
    487 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    488 #define	CSR_WRITE(sc, reg, val)						\
    489 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    490 #define	CSR_WRITE_FLUSH(sc)						\
    491 	(void) CSR_READ((sc), WMREG_STATUS)
    492 
    493 #define ICH8_FLASH_READ32(sc, reg) \
    494 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    495 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    496 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    497 
    498 #define ICH8_FLASH_READ16(sc, reg) \
    499 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    500 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    501 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    502 
    503 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    504 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    505 
    506 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    507 #define	WM_CDTXADDR_HI(txq, x)						\
    508 	(sizeof(bus_addr_t) == 8 ?					\
    509 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    510 
    511 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    512 #define	WM_CDRXADDR_HI(rxq, x)						\
    513 	(sizeof(bus_addr_t) == 8 ?					\
    514 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    515 
    516 /*
    517  * Register read/write functions.
    518  * Other than CSR_{READ|WRITE}().
    519  */
    520 #if 0
    521 static inline uint32_t wm_io_read(struct wm_softc *, int);
    522 #endif
    523 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    524 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    525 	uint32_t, uint32_t);
    526 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    527 
    528 /*
    529  * Descriptor sync/init functions.
    530  */
    531 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    532 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    533 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    534 
    535 /*
    536  * Device driver interface functions and commonly used functions.
    537  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    538  */
    539 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    540 static int	wm_match(device_t, cfdata_t, void *);
    541 static void	wm_attach(device_t, device_t, void *);
    542 static int	wm_detach(device_t, int);
    543 static bool	wm_suspend(device_t, const pmf_qual_t *);
    544 static bool	wm_resume(device_t, const pmf_qual_t *);
    545 static void	wm_watchdog(struct ifnet *);
    546 static void	wm_tick(void *);
    547 static int	wm_ifflags_cb(struct ethercom *);
    548 static int	wm_ioctl(struct ifnet *, u_long, void *);
    549 /* MAC address related */
    550 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    551 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    552 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    553 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    554 static void	wm_set_filter(struct wm_softc *);
    555 /* Reset and init related */
    556 static void	wm_set_vlan(struct wm_softc *);
    557 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    558 static void	wm_get_auto_rd_done(struct wm_softc *);
    559 static void	wm_lan_init_done(struct wm_softc *);
    560 static void	wm_get_cfg_done(struct wm_softc *);
    561 static void	wm_initialize_hardware_bits(struct wm_softc *);
    562 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    563 static void	wm_reset(struct wm_softc *);
    564 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    565 static void	wm_rxdrain(struct wm_rxqueue *);
    566 static void	wm_rss_getkey(uint8_t *);
    567 static void	wm_init_rss(struct wm_softc *);
    568 static void	wm_adjust_qnum(struct wm_softc *, int);
    569 static int	wm_setup_legacy(struct wm_softc *);
    570 static int	wm_setup_msix(struct wm_softc *);
    571 static int	wm_init(struct ifnet *);
    572 static int	wm_init_locked(struct ifnet *);
    573 static void	wm_stop(struct ifnet *, int);
    574 static void	wm_stop_locked(struct ifnet *, int);
    575 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    576 static void	wm_82547_txfifo_stall(void *);
    577 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    578 /* DMA related */
    579 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    580 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    581 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    582 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    583 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    584 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    585 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    586 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    587 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    588 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    589 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    590 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    591 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    592 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    593 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    594 static int	wm_alloc_txrx_queues(struct wm_softc *);
    595 static void	wm_free_txrx_queues(struct wm_softc *);
    596 static int	wm_init_txrx_queues(struct wm_softc *);
    597 /* Start */
    598 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    599     uint32_t *, uint8_t *);
    600 static void	wm_start(struct ifnet *);
    601 static void	wm_start_locked(struct ifnet *);
    602 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    603     uint32_t *, uint32_t *, bool *);
    604 static void	wm_nq_start(struct ifnet *);
    605 static void	wm_nq_start_locked(struct ifnet *);
    606 /* Interrupt */
    607 static int	wm_txeof(struct wm_softc *);
    608 static void	wm_rxeof(struct wm_rxqueue *);
    609 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    610 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    611 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    612 static void	wm_linkintr(struct wm_softc *, uint32_t);
    613 static int	wm_intr_legacy(void *);
    614 static int	wm_txintr_msix(void *);
    615 static int	wm_rxintr_msix(void *);
    616 static int	wm_linkintr_msix(void *);
    617 
    618 /*
    619  * Media related.
    620  * GMII, SGMII, TBI, SERDES and SFP.
    621  */
    622 /* Common */
    623 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    624 /* GMII related */
    625 static void	wm_gmii_reset(struct wm_softc *);
    626 static int	wm_get_phy_id_82575(struct wm_softc *);
    627 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    628 static int	wm_gmii_mediachange(struct ifnet *);
    629 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    630 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    631 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    632 static int	wm_gmii_i82543_readreg(device_t, int, int);
    633 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    634 static int	wm_gmii_i82544_readreg(device_t, int, int);
    635 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    636 static int	wm_gmii_i80003_readreg(device_t, int, int);
    637 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    638 static int	wm_gmii_bm_readreg(device_t, int, int);
    639 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    640 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    641 static int	wm_gmii_hv_readreg(device_t, int, int);
    642 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    643 static int	wm_gmii_82580_readreg(device_t, int, int);
    644 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    645 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    646 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    647 static void	wm_gmii_statchg(struct ifnet *);
    648 static int	wm_kmrn_readreg(struct wm_softc *, int);
    649 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    650 /* SGMII */
    651 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    652 static int	wm_sgmii_readreg(device_t, int, int);
    653 static void	wm_sgmii_writereg(device_t, int, int, int);
    654 /* TBI related */
    655 static void	wm_tbi_mediainit(struct wm_softc *);
    656 static int	wm_tbi_mediachange(struct ifnet *);
    657 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    658 static int	wm_check_for_link(struct wm_softc *);
    659 static void	wm_tbi_tick(struct wm_softc *);
    660 /* SERDES related */
    661 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    662 static int	wm_serdes_mediachange(struct ifnet *);
    663 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    664 static void	wm_serdes_tick(struct wm_softc *);
    665 /* SFP related */
    666 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    667 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    668 
    669 /*
    670  * NVM related.
    671  * Microwire, SPI (w/wo EERD) and Flash.
    672  */
    673 /* Misc functions */
    674 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    675 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    676 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    677 /* Microwire */
    678 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    679 /* SPI */
    680 static int	wm_nvm_ready_spi(struct wm_softc *);
    681 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    682 /* Using with EERD */
    683 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    684 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    685 /* Flash */
    686 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    687     unsigned int *);
    688 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    689 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    690 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    691 	uint16_t *);
    692 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    693 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    694 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    695 /* iNVM */
    696 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    697 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    698 /* Lock, detecting NVM type, validate checksum and read */
    699 static int	wm_nvm_acquire(struct wm_softc *);
    700 static void	wm_nvm_release(struct wm_softc *);
    701 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    702 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    703 static int	wm_nvm_validate_checksum(struct wm_softc *);
    704 static void	wm_nvm_version_invm(struct wm_softc *);
    705 static void	wm_nvm_version(struct wm_softc *);
    706 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    707 
    708 /*
    709  * Hardware semaphores.
    710  * Very complexed...
    711  */
    712 static int	wm_get_swsm_semaphore(struct wm_softc *);
    713 static void	wm_put_swsm_semaphore(struct wm_softc *);
    714 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    715 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    716 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    717 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    718 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    719 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    720 
    721 /*
    722  * Management mode and power management related subroutines.
    723  * BMC, AMT, suspend/resume and EEE.
    724  */
    725 #ifdef WM_WOL
    726 static int	wm_check_mng_mode(struct wm_softc *);
    727 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    728 static int	wm_check_mng_mode_82574(struct wm_softc *);
    729 static int	wm_check_mng_mode_generic(struct wm_softc *);
    730 #endif
    731 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    732 static bool	wm_phy_resetisblocked(struct wm_softc *);
    733 static void	wm_get_hw_control(struct wm_softc *);
    734 static void	wm_release_hw_control(struct wm_softc *);
    735 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    736 static void	wm_smbustopci(struct wm_softc *);
    737 static void	wm_init_manageability(struct wm_softc *);
    738 static void	wm_release_manageability(struct wm_softc *);
    739 static void	wm_get_wakeup(struct wm_softc *);
    740 #ifdef WM_WOL
    741 static void	wm_enable_phy_wakeup(struct wm_softc *);
    742 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    743 static void	wm_enable_wakeup(struct wm_softc *);
    744 #endif
    745 /* LPLU (Low Power Link Up) */
    746 static void	wm_lplu_d0_disable(struct wm_softc *);
    747 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    748 /* EEE */
    749 static void	wm_set_eee_i350(struct wm_softc *);
    750 
    751 /*
    752  * Workarounds (mainly PHY related).
    753  * Basically, PHY's workarounds are in the PHY drivers.
    754  */
    755 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    756 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    757 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    758 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    759 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    760 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    761 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    762 static void	wm_reset_init_script_82575(struct wm_softc *);
    763 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    764 static void	wm_pll_workaround_i210(struct wm_softc *);
    765 
    766 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    767     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    768 
    769 /*
    770  * Devices supported by this driver.
    771  */
    772 static const struct wm_product {
    773 	pci_vendor_id_t		wmp_vendor;
    774 	pci_product_id_t	wmp_product;
    775 	const char		*wmp_name;
    776 	wm_chip_type		wmp_type;
    777 	uint32_t		wmp_flags;
    778 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    779 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    780 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    781 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    782 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    783 } wm_products[] = {
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    785 	  "Intel i82542 1000BASE-X Ethernet",
    786 	  WM_T_82542_2_1,	WMP_F_FIBER },
    787 
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    789 	  "Intel i82543GC 1000BASE-X Ethernet",
    790 	  WM_T_82543,		WMP_F_FIBER },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    793 	  "Intel i82543GC 1000BASE-T Ethernet",
    794 	  WM_T_82543,		WMP_F_COPPER },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    797 	  "Intel i82544EI 1000BASE-T Ethernet",
    798 	  WM_T_82544,		WMP_F_COPPER },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    801 	  "Intel i82544EI 1000BASE-X Ethernet",
    802 	  WM_T_82544,		WMP_F_FIBER },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    805 	  "Intel i82544GC 1000BASE-T Ethernet",
    806 	  WM_T_82544,		WMP_F_COPPER },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    809 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    810 	  WM_T_82544,		WMP_F_COPPER },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    813 	  "Intel i82540EM 1000BASE-T Ethernet",
    814 	  WM_T_82540,		WMP_F_COPPER },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    817 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    818 	  WM_T_82540,		WMP_F_COPPER },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    821 	  "Intel i82540EP 1000BASE-T Ethernet",
    822 	  WM_T_82540,		WMP_F_COPPER },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    825 	  "Intel i82540EP 1000BASE-T Ethernet",
    826 	  WM_T_82540,		WMP_F_COPPER },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    829 	  "Intel i82540EP 1000BASE-T Ethernet",
    830 	  WM_T_82540,		WMP_F_COPPER },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    833 	  "Intel i82545EM 1000BASE-T Ethernet",
    834 	  WM_T_82545,		WMP_F_COPPER },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    837 	  "Intel i82545GM 1000BASE-T Ethernet",
    838 	  WM_T_82545_3,		WMP_F_COPPER },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    841 	  "Intel i82545GM 1000BASE-X Ethernet",
    842 	  WM_T_82545_3,		WMP_F_FIBER },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    845 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    846 	  WM_T_82545_3,		WMP_F_SERDES },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    849 	  "Intel i82546EB 1000BASE-T Ethernet",
    850 	  WM_T_82546,		WMP_F_COPPER },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    853 	  "Intel i82546EB 1000BASE-T Ethernet",
    854 	  WM_T_82546,		WMP_F_COPPER },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    857 	  "Intel i82545EM 1000BASE-X Ethernet",
    858 	  WM_T_82545,		WMP_F_FIBER },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    861 	  "Intel i82546EB 1000BASE-X Ethernet",
    862 	  WM_T_82546,		WMP_F_FIBER },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    865 	  "Intel i82546GB 1000BASE-T Ethernet",
    866 	  WM_T_82546_3,		WMP_F_COPPER },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    869 	  "Intel i82546GB 1000BASE-X Ethernet",
    870 	  WM_T_82546_3,		WMP_F_FIBER },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    873 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    874 	  WM_T_82546_3,		WMP_F_SERDES },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    877 	  "i82546GB quad-port Gigabit Ethernet",
    878 	  WM_T_82546_3,		WMP_F_COPPER },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    881 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    882 	  WM_T_82546_3,		WMP_F_COPPER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    885 	  "Intel PRO/1000MT (82546GB)",
    886 	  WM_T_82546_3,		WMP_F_COPPER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    889 	  "Intel i82541EI 1000BASE-T Ethernet",
    890 	  WM_T_82541,		WMP_F_COPPER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    893 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    894 	  WM_T_82541,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    897 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    898 	  WM_T_82541,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    901 	  "Intel i82541ER 1000BASE-T Ethernet",
    902 	  WM_T_82541_2,		WMP_F_COPPER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    905 	  "Intel i82541GI 1000BASE-T Ethernet",
    906 	  WM_T_82541_2,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    909 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    910 	  WM_T_82541_2,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    913 	  "Intel i82541PI 1000BASE-T Ethernet",
    914 	  WM_T_82541_2,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    917 	  "Intel i82547EI 1000BASE-T Ethernet",
    918 	  WM_T_82547,		WMP_F_COPPER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    921 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    922 	  WM_T_82547,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    925 	  "Intel i82547GI 1000BASE-T Ethernet",
    926 	  WM_T_82547_2,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    929 	  "Intel PRO/1000 PT (82571EB)",
    930 	  WM_T_82571,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    933 	  "Intel PRO/1000 PF (82571EB)",
    934 	  WM_T_82571,		WMP_F_FIBER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    937 	  "Intel PRO/1000 PB (82571EB)",
    938 	  WM_T_82571,		WMP_F_SERDES },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    941 	  "Intel PRO/1000 QT (82571EB)",
    942 	  WM_T_82571,		WMP_F_COPPER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    945 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    946 	  WM_T_82571,		WMP_F_COPPER, },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    949 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    950 	  WM_T_82571,		WMP_F_COPPER, },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    953 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    954 	  WM_T_82571,		WMP_F_SERDES, },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    957 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    958 	  WM_T_82571,		WMP_F_SERDES, },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    961 	  "Intel 82571EB Quad 1000baseX Ethernet",
    962 	  WM_T_82571,		WMP_F_FIBER, },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    965 	  "Intel i82572EI 1000baseT Ethernet",
    966 	  WM_T_82572,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    969 	  "Intel i82572EI 1000baseX Ethernet",
    970 	  WM_T_82572,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    973 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    974 	  WM_T_82572,		WMP_F_SERDES },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    977 	  "Intel i82572EI 1000baseT Ethernet",
    978 	  WM_T_82572,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    981 	  "Intel i82573E",
    982 	  WM_T_82573,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    985 	  "Intel i82573E IAMT",
    986 	  WM_T_82573,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    989 	  "Intel i82573L Gigabit Ethernet",
    990 	  WM_T_82573,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    993 	  "Intel i82574L",
    994 	  WM_T_82574,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    997 	  "Intel i82574L",
    998 	  WM_T_82574,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1001 	  "Intel i82583V",
   1002 	  WM_T_82583,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1005 	  "i80003 dual 1000baseT Ethernet",
   1006 	  WM_T_80003,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1009 	  "i80003 dual 1000baseX Ethernet",
   1010 	  WM_T_80003,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1013 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1014 	  WM_T_80003,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1017 	  "Intel i80003 1000baseT Ethernet",
   1018 	  WM_T_80003,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1021 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1022 	  WM_T_80003,		WMP_F_SERDES },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1025 	  "Intel i82801H (M_AMT) LAN Controller",
   1026 	  WM_T_ICH8,		WMP_F_COPPER },
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1028 	  "Intel i82801H (AMT) LAN Controller",
   1029 	  WM_T_ICH8,		WMP_F_COPPER },
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1031 	  "Intel i82801H LAN Controller",
   1032 	  WM_T_ICH8,		WMP_F_COPPER },
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1034 	  "Intel i82801H (IFE) LAN Controller",
   1035 	  WM_T_ICH8,		WMP_F_COPPER },
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1037 	  "Intel i82801H (M) LAN Controller",
   1038 	  WM_T_ICH8,		WMP_F_COPPER },
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1040 	  "Intel i82801H IFE (GT) LAN Controller",
   1041 	  WM_T_ICH8,		WMP_F_COPPER },
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1043 	  "Intel i82801H IFE (G) LAN Controller",
   1044 	  WM_T_ICH8,		WMP_F_COPPER },
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1046 	  "82801I (AMT) LAN Controller",
   1047 	  WM_T_ICH9,		WMP_F_COPPER },
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1049 	  "82801I LAN Controller",
   1050 	  WM_T_ICH9,		WMP_F_COPPER },
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1052 	  "82801I (G) LAN Controller",
   1053 	  WM_T_ICH9,		WMP_F_COPPER },
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1055 	  "82801I (GT) LAN Controller",
   1056 	  WM_T_ICH9,		WMP_F_COPPER },
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1058 	  "82801I (C) LAN Controller",
   1059 	  WM_T_ICH9,		WMP_F_COPPER },
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1061 	  "82801I mobile LAN Controller",
   1062 	  WM_T_ICH9,		WMP_F_COPPER },
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1064 	  "82801I mobile (V) LAN Controller",
   1065 	  WM_T_ICH9,		WMP_F_COPPER },
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1067 	  "82801I mobile (AMT) LAN Controller",
   1068 	  WM_T_ICH9,		WMP_F_COPPER },
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1070 	  "82567LM-4 LAN Controller",
   1071 	  WM_T_ICH9,		WMP_F_COPPER },
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1073 	  "82567V-3 LAN Controller",
   1074 	  WM_T_ICH9,		WMP_F_COPPER },
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1076 	  "82567LM-2 LAN Controller",
   1077 	  WM_T_ICH10,		WMP_F_COPPER },
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1079 	  "82567LF-2 LAN Controller",
   1080 	  WM_T_ICH10,		WMP_F_COPPER },
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1082 	  "82567LM-3 LAN Controller",
   1083 	  WM_T_ICH10,		WMP_F_COPPER },
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1085 	  "82567LF-3 LAN Controller",
   1086 	  WM_T_ICH10,		WMP_F_COPPER },
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1088 	  "82567V-2 LAN Controller",
   1089 	  WM_T_ICH10,		WMP_F_COPPER },
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1091 	  "82567V-3? LAN Controller",
   1092 	  WM_T_ICH10,		WMP_F_COPPER },
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1094 	  "HANKSVILLE LAN Controller",
   1095 	  WM_T_ICH10,		WMP_F_COPPER },
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1097 	  "PCH LAN (82577LM) Controller",
   1098 	  WM_T_PCH,		WMP_F_COPPER },
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1100 	  "PCH LAN (82577LC) Controller",
   1101 	  WM_T_PCH,		WMP_F_COPPER },
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1103 	  "PCH LAN (82578DM) Controller",
   1104 	  WM_T_PCH,		WMP_F_COPPER },
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1106 	  "PCH LAN (82578DC) Controller",
   1107 	  WM_T_PCH,		WMP_F_COPPER },
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1109 	  "PCH2 LAN (82579LM) Controller",
   1110 	  WM_T_PCH2,		WMP_F_COPPER },
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1112 	  "PCH2 LAN (82579V) Controller",
   1113 	  WM_T_PCH2,		WMP_F_COPPER },
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1115 	  "82575EB dual-1000baseT Ethernet",
   1116 	  WM_T_82575,		WMP_F_COPPER },
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1118 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1119 	  WM_T_82575,		WMP_F_SERDES },
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1121 	  "82575GB quad-1000baseT Ethernet",
   1122 	  WM_T_82575,		WMP_F_COPPER },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1124 	  "82575GB quad-1000baseT Ethernet (PM)",
   1125 	  WM_T_82575,		WMP_F_COPPER },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1127 	  "82576 1000BaseT Ethernet",
   1128 	  WM_T_82576,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1130 	  "82576 1000BaseX Ethernet",
   1131 	  WM_T_82576,		WMP_F_FIBER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1134 	  "82576 gigabit Ethernet (SERDES)",
   1135 	  WM_T_82576,		WMP_F_SERDES },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1138 	  "82576 quad-1000BaseT Ethernet",
   1139 	  WM_T_82576,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1142 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1143 	  WM_T_82576,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1146 	  "82576 gigabit Ethernet",
   1147 	  WM_T_82576,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1150 	  "82576 gigabit Ethernet (SERDES)",
   1151 	  WM_T_82576,		WMP_F_SERDES },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1153 	  "82576 quad-gigabit Ethernet (SERDES)",
   1154 	  WM_T_82576,		WMP_F_SERDES },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1157 	  "82580 1000BaseT Ethernet",
   1158 	  WM_T_82580,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1160 	  "82580 1000BaseX Ethernet",
   1161 	  WM_T_82580,		WMP_F_FIBER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1164 	  "82580 1000BaseT Ethernet (SERDES)",
   1165 	  WM_T_82580,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1168 	  "82580 gigabit Ethernet (SGMII)",
   1169 	  WM_T_82580,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1171 	  "82580 dual-1000BaseT Ethernet",
   1172 	  WM_T_82580,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1175 	  "82580 quad-1000BaseX Ethernet",
   1176 	  WM_T_82580,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1179 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1180 	  WM_T_82580,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1183 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1184 	  WM_T_82580,		WMP_F_SERDES },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1187 	  "DH89XXCC 1000BASE-KX Ethernet",
   1188 	  WM_T_82580,		WMP_F_SERDES },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1191 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1192 	  WM_T_82580,		WMP_F_SERDES },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1195 	  "I350 Gigabit Network Connection",
   1196 	  WM_T_I350,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1199 	  "I350 Gigabit Fiber Network Connection",
   1200 	  WM_T_I350,		WMP_F_FIBER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1203 	  "I350 Gigabit Backplane Connection",
   1204 	  WM_T_I350,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1207 	  "I350 Quad Port Gigabit Ethernet",
   1208 	  WM_T_I350,		WMP_F_SERDES },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1211 	  "I350 Gigabit Connection",
   1212 	  WM_T_I350,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1215 	  "I354 Gigabit Ethernet (KX)",
   1216 	  WM_T_I354,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1219 	  "I354 Gigabit Ethernet (SGMII)",
   1220 	  WM_T_I354,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1223 	  "I354 Gigabit Ethernet (2.5G)",
   1224 	  WM_T_I354,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1227 	  "I210-T1 Ethernet Server Adapter",
   1228 	  WM_T_I210,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1231 	  "I210 Ethernet (Copper OEM)",
   1232 	  WM_T_I210,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1235 	  "I210 Ethernet (Copper IT)",
   1236 	  WM_T_I210,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1239 	  "I210 Ethernet (FLASH less)",
   1240 	  WM_T_I210,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1243 	  "I210 Gigabit Ethernet (Fiber)",
   1244 	  WM_T_I210,		WMP_F_FIBER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1247 	  "I210 Gigabit Ethernet (SERDES)",
   1248 	  WM_T_I210,		WMP_F_SERDES },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1251 	  "I210 Gigabit Ethernet (FLASH less)",
   1252 	  WM_T_I210,		WMP_F_SERDES },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1255 	  "I210 Gigabit Ethernet (SGMII)",
   1256 	  WM_T_I210,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1259 	  "I211 Ethernet (COPPER)",
   1260 	  WM_T_I211,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1262 	  "I217 V Ethernet Connection",
   1263 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1265 	  "I217 LM Ethernet Connection",
   1266 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1268 	  "I218 V Ethernet Connection",
   1269 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1271 	  "I218 V Ethernet Connection",
   1272 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1274 	  "I218 V Ethernet Connection",
   1275 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1277 	  "I218 LM Ethernet Connection",
   1278 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1280 	  "I218 LM Ethernet Connection",
   1281 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1283 	  "I218 LM Ethernet Connection",
   1284 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1285 	{ 0,			0,
   1286 	  NULL,
   1287 	  0,			0 },
   1288 };
   1289 
   1290 #ifdef WM_EVENT_COUNTERS
   1291 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1292 #endif /* WM_EVENT_COUNTERS */
   1293 
   1294 
   1295 /*
   1296  * Register read/write functions.
   1297  * Other than CSR_{READ|WRITE}().
   1298  */
   1299 
   1300 #if 0 /* Not currently used */
   1301 static inline uint32_t
   1302 wm_io_read(struct wm_softc *sc, int reg)
   1303 {
   1304 
   1305 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1306 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1307 }
   1308 #endif
   1309 
   1310 static inline void
   1311 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1312 {
   1313 
   1314 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1315 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1316 }
   1317 
   1318 static inline void
   1319 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1320     uint32_t data)
   1321 {
   1322 	uint32_t regval;
   1323 	int i;
   1324 
   1325 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1326 
   1327 	CSR_WRITE(sc, reg, regval);
   1328 
   1329 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1330 		delay(5);
   1331 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1332 			break;
   1333 	}
   1334 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1335 		aprint_error("%s: WARNING:"
   1336 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1337 		    device_xname(sc->sc_dev), reg);
   1338 	}
   1339 }
   1340 
   1341 static inline void
   1342 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1343 {
   1344 	wa->wa_low = htole32(v & 0xffffffffU);
   1345 	if (sizeof(bus_addr_t) == 8)
   1346 		wa->wa_high = htole32((uint64_t) v >> 32);
   1347 	else
   1348 		wa->wa_high = 0;
   1349 }
   1350 
   1351 /*
   1352  * Descriptor sync/init functions.
   1353  */
   1354 static inline void
   1355 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1356 {
   1357 	struct wm_softc *sc = txq->txq_sc;
   1358 
   1359 	/* If it will wrap around, sync to the end of the ring. */
   1360 	if ((start + num) > WM_NTXDESC(txq)) {
   1361 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1362 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1363 		    (WM_NTXDESC(txq) - start), ops);
   1364 		num -= (WM_NTXDESC(txq) - start);
   1365 		start = 0;
   1366 	}
   1367 
   1368 	/* Now sync whatever is left. */
   1369 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1370 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1371 }
   1372 
   1373 static inline void
   1374 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1375 {
   1376 	struct wm_softc *sc = rxq->rxq_sc;
   1377 
   1378 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1379 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1380 }
   1381 
   1382 static inline void
   1383 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1384 {
   1385 	struct wm_softc *sc = rxq->rxq_sc;
   1386 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1387 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1388 	struct mbuf *m = rxs->rxs_mbuf;
   1389 
   1390 	/*
   1391 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1392 	 * so that the payload after the Ethernet header is aligned
   1393 	 * to a 4-byte boundary.
   1394 
   1395 	 * XXX BRAINDAMAGE ALERT!
   1396 	 * The stupid chip uses the same size for every buffer, which
   1397 	 * is set in the Receive Control register.  We are using the 2K
   1398 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1399 	 * reason, we can't "scoot" packets longer than the standard
   1400 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1401 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1402 	 * the upper layer copy the headers.
   1403 	 */
   1404 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1405 
   1406 	wm_set_dma_addr(&rxd->wrx_addr,
   1407 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1408 	rxd->wrx_len = 0;
   1409 	rxd->wrx_cksum = 0;
   1410 	rxd->wrx_status = 0;
   1411 	rxd->wrx_errors = 0;
   1412 	rxd->wrx_special = 0;
   1413 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1414 
   1415 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1416 }
   1417 
   1418 /*
   1419  * Device driver interface functions and commonly used functions.
   1420  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1421  */
   1422 
   1423 /* Lookup supported device table */
   1424 static const struct wm_product *
   1425 wm_lookup(const struct pci_attach_args *pa)
   1426 {
   1427 	const struct wm_product *wmp;
   1428 
   1429 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1430 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1431 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1432 			return wmp;
   1433 	}
   1434 	return NULL;
   1435 }
   1436 
   1437 /* The match function (ca_match) */
   1438 static int
   1439 wm_match(device_t parent, cfdata_t cf, void *aux)
   1440 {
   1441 	struct pci_attach_args *pa = aux;
   1442 
   1443 	if (wm_lookup(pa) != NULL)
   1444 		return 1;
   1445 
   1446 	return 0;
   1447 }
   1448 
   1449 /* The attach function (ca_attach) */
   1450 static void
   1451 wm_attach(device_t parent, device_t self, void *aux)
   1452 {
   1453 	struct wm_softc *sc = device_private(self);
   1454 	struct pci_attach_args *pa = aux;
   1455 	prop_dictionary_t dict;
   1456 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1457 	pci_chipset_tag_t pc = pa->pa_pc;
   1458 	int counts[PCI_INTR_TYPE_SIZE];
   1459 	pci_intr_type_t max_type;
   1460 	const char *eetype, *xname;
   1461 	bus_space_tag_t memt;
   1462 	bus_space_handle_t memh;
   1463 	bus_size_t memsize;
   1464 	int memh_valid;
   1465 	int i, error;
   1466 	const struct wm_product *wmp;
   1467 	prop_data_t ea;
   1468 	prop_number_t pn;
   1469 	uint8_t enaddr[ETHER_ADDR_LEN];
   1470 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1471 	pcireg_t preg, memtype;
   1472 	uint16_t eeprom_data, apme_mask;
   1473 	bool force_clear_smbi;
   1474 	uint32_t link_mode;
   1475 	uint32_t reg;
   1476 
   1477 	sc->sc_dev = self;
   1478 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1479 	sc->sc_stopping = false;
   1480 
   1481 	wmp = wm_lookup(pa);
   1482 #ifdef DIAGNOSTIC
   1483 	if (wmp == NULL) {
   1484 		printf("\n");
   1485 		panic("wm_attach: impossible");
   1486 	}
   1487 #endif
   1488 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1489 
   1490 	sc->sc_pc = pa->pa_pc;
   1491 	sc->sc_pcitag = pa->pa_tag;
   1492 
   1493 	if (pci_dma64_available(pa))
   1494 		sc->sc_dmat = pa->pa_dmat64;
   1495 	else
   1496 		sc->sc_dmat = pa->pa_dmat;
   1497 
   1498 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1499 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1500 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1501 
   1502 	sc->sc_type = wmp->wmp_type;
   1503 	if (sc->sc_type < WM_T_82543) {
   1504 		if (sc->sc_rev < 2) {
   1505 			aprint_error_dev(sc->sc_dev,
   1506 			    "i82542 must be at least rev. 2\n");
   1507 			return;
   1508 		}
   1509 		if (sc->sc_rev < 3)
   1510 			sc->sc_type = WM_T_82542_2_0;
   1511 	}
   1512 
   1513 	/*
   1514 	 * Disable MSI for Errata:
   1515 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1516 	 *
   1517 	 *  82544: Errata 25
   1518 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1519 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1520 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1521 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1522 	 *
   1523 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1524 	 *
   1525 	 *  82571 & 82572: Errata 63
   1526 	 */
   1527 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1528 	    || (sc->sc_type == WM_T_82572))
   1529 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1530 
   1531 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1532 	    || (sc->sc_type == WM_T_82580)
   1533 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1534 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1535 		sc->sc_flags |= WM_F_NEWQUEUE;
   1536 
   1537 	/* Set device properties (mactype) */
   1538 	dict = device_properties(sc->sc_dev);
   1539 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1540 
   1541 	/*
   1542 	 * Map the device.  All devices support memory-mapped acccess,
   1543 	 * and it is really required for normal operation.
   1544 	 */
   1545 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1546 	switch (memtype) {
   1547 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1548 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1549 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1550 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1551 		break;
   1552 	default:
   1553 		memh_valid = 0;
   1554 		break;
   1555 	}
   1556 
   1557 	if (memh_valid) {
   1558 		sc->sc_st = memt;
   1559 		sc->sc_sh = memh;
   1560 		sc->sc_ss = memsize;
   1561 	} else {
   1562 		aprint_error_dev(sc->sc_dev,
   1563 		    "unable to map device registers\n");
   1564 		return;
   1565 	}
   1566 
   1567 	/*
   1568 	 * In addition, i82544 and later support I/O mapped indirect
   1569 	 * register access.  It is not desirable (nor supported in
   1570 	 * this driver) to use it for normal operation, though it is
   1571 	 * required to work around bugs in some chip versions.
   1572 	 */
   1573 	if (sc->sc_type >= WM_T_82544) {
   1574 		/* First we have to find the I/O BAR. */
   1575 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1576 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1577 			if (memtype == PCI_MAPREG_TYPE_IO)
   1578 				break;
   1579 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1580 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1581 				i += 4;	/* skip high bits, too */
   1582 		}
   1583 		if (i < PCI_MAPREG_END) {
   1584 			/*
   1585 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1586 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1587 			 * It's no problem because newer chips has no this
   1588 			 * bug.
   1589 			 *
   1590 			 * The i8254x doesn't apparently respond when the
   1591 			 * I/O BAR is 0, which looks somewhat like it's not
   1592 			 * been configured.
   1593 			 */
   1594 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1595 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1596 				aprint_error_dev(sc->sc_dev,
   1597 				    "WARNING: I/O BAR at zero.\n");
   1598 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1599 					0, &sc->sc_iot, &sc->sc_ioh,
   1600 					NULL, &sc->sc_ios) == 0) {
   1601 				sc->sc_flags |= WM_F_IOH_VALID;
   1602 			} else {
   1603 				aprint_error_dev(sc->sc_dev,
   1604 				    "WARNING: unable to map I/O space\n");
   1605 			}
   1606 		}
   1607 
   1608 	}
   1609 
   1610 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1611 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1612 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1613 	if (sc->sc_type < WM_T_82542_2_1)
   1614 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1615 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1616 
   1617 	/* power up chip */
   1618 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1619 	    NULL)) && error != EOPNOTSUPP) {
   1620 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1621 		return;
   1622 	}
   1623 
   1624 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1625 
   1626 	/* Allocation settings */
   1627 	max_type = PCI_INTR_TYPE_MSIX;
   1628 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1629 	counts[PCI_INTR_TYPE_MSI] = 1;
   1630 	counts[PCI_INTR_TYPE_INTX] = 1;
   1631 
   1632 alloc_retry:
   1633 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1634 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1635 		return;
   1636 	}
   1637 
   1638 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1639 		error = wm_setup_msix(sc);
   1640 		if (error) {
   1641 			pci_intr_release(pc, sc->sc_intrs,
   1642 			    counts[PCI_INTR_TYPE_MSIX]);
   1643 
   1644 			/* Setup for MSI: Disable MSI-X */
   1645 			max_type = PCI_INTR_TYPE_MSI;
   1646 			counts[PCI_INTR_TYPE_MSI] = 1;
   1647 			counts[PCI_INTR_TYPE_INTX] = 1;
   1648 			goto alloc_retry;
   1649 		}
   1650 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1651 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1652 		error = wm_setup_legacy(sc);
   1653 		if (error) {
   1654 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1655 			    counts[PCI_INTR_TYPE_MSI]);
   1656 
   1657 			/* The next try is for INTx: Disable MSI */
   1658 			max_type = PCI_INTR_TYPE_INTX;
   1659 			counts[PCI_INTR_TYPE_INTX] = 1;
   1660 			goto alloc_retry;
   1661 		}
   1662 	} else {
   1663 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1664 		error = wm_setup_legacy(sc);
   1665 		if (error) {
   1666 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1667 			    counts[PCI_INTR_TYPE_INTX]);
   1668 			return;
   1669 		}
   1670 	}
   1671 
   1672 	/*
   1673 	 * Check the function ID (unit number of the chip).
   1674 	 */
   1675 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1676 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1677 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1678 	    || (sc->sc_type == WM_T_82580)
   1679 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1680 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1681 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1682 	else
   1683 		sc->sc_funcid = 0;
   1684 
   1685 	/*
   1686 	 * Determine a few things about the bus we're connected to.
   1687 	 */
   1688 	if (sc->sc_type < WM_T_82543) {
   1689 		/* We don't really know the bus characteristics here. */
   1690 		sc->sc_bus_speed = 33;
   1691 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1692 		/*
   1693 		 * CSA (Communication Streaming Architecture) is about as fast
   1694 		 * a 32-bit 66MHz PCI Bus.
   1695 		 */
   1696 		sc->sc_flags |= WM_F_CSA;
   1697 		sc->sc_bus_speed = 66;
   1698 		aprint_verbose_dev(sc->sc_dev,
   1699 		    "Communication Streaming Architecture\n");
   1700 		if (sc->sc_type == WM_T_82547) {
   1701 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1702 			callout_setfunc(&sc->sc_txfifo_ch,
   1703 					wm_82547_txfifo_stall, sc);
   1704 			aprint_verbose_dev(sc->sc_dev,
   1705 			    "using 82547 Tx FIFO stall work-around\n");
   1706 		}
   1707 	} else if (sc->sc_type >= WM_T_82571) {
   1708 		sc->sc_flags |= WM_F_PCIE;
   1709 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1710 		    && (sc->sc_type != WM_T_ICH10)
   1711 		    && (sc->sc_type != WM_T_PCH)
   1712 		    && (sc->sc_type != WM_T_PCH2)
   1713 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1714 			/* ICH* and PCH* have no PCIe capability registers */
   1715 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1716 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1717 				NULL) == 0)
   1718 				aprint_error_dev(sc->sc_dev,
   1719 				    "unable to find PCIe capability\n");
   1720 		}
   1721 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1722 	} else {
   1723 		reg = CSR_READ(sc, WMREG_STATUS);
   1724 		if (reg & STATUS_BUS64)
   1725 			sc->sc_flags |= WM_F_BUS64;
   1726 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1727 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1728 
   1729 			sc->sc_flags |= WM_F_PCIX;
   1730 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1731 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1732 				aprint_error_dev(sc->sc_dev,
   1733 				    "unable to find PCIX capability\n");
   1734 			else if (sc->sc_type != WM_T_82545_3 &&
   1735 				 sc->sc_type != WM_T_82546_3) {
   1736 				/*
   1737 				 * Work around a problem caused by the BIOS
   1738 				 * setting the max memory read byte count
   1739 				 * incorrectly.
   1740 				 */
   1741 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1742 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1743 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1744 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1745 
   1746 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1747 				    PCIX_CMD_BYTECNT_SHIFT;
   1748 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1749 				    PCIX_STATUS_MAXB_SHIFT;
   1750 				if (bytecnt > maxb) {
   1751 					aprint_verbose_dev(sc->sc_dev,
   1752 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1753 					    512 << bytecnt, 512 << maxb);
   1754 					pcix_cmd = (pcix_cmd &
   1755 					    ~PCIX_CMD_BYTECNT_MASK) |
   1756 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1757 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1758 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1759 					    pcix_cmd);
   1760 				}
   1761 			}
   1762 		}
   1763 		/*
   1764 		 * The quad port adapter is special; it has a PCIX-PCIX
   1765 		 * bridge on the board, and can run the secondary bus at
   1766 		 * a higher speed.
   1767 		 */
   1768 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1769 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1770 								      : 66;
   1771 		} else if (sc->sc_flags & WM_F_PCIX) {
   1772 			switch (reg & STATUS_PCIXSPD_MASK) {
   1773 			case STATUS_PCIXSPD_50_66:
   1774 				sc->sc_bus_speed = 66;
   1775 				break;
   1776 			case STATUS_PCIXSPD_66_100:
   1777 				sc->sc_bus_speed = 100;
   1778 				break;
   1779 			case STATUS_PCIXSPD_100_133:
   1780 				sc->sc_bus_speed = 133;
   1781 				break;
   1782 			default:
   1783 				aprint_error_dev(sc->sc_dev,
   1784 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1785 				    reg & STATUS_PCIXSPD_MASK);
   1786 				sc->sc_bus_speed = 66;
   1787 				break;
   1788 			}
   1789 		} else
   1790 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1791 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1792 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1793 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1794 	}
   1795 
   1796 	/* clear interesting stat counters */
   1797 	CSR_READ(sc, WMREG_COLC);
   1798 	CSR_READ(sc, WMREG_RXERRC);
   1799 
   1800 	/* get PHY control from SMBus to PCIe */
   1801 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1802 	    || (sc->sc_type == WM_T_PCH_LPT))
   1803 		wm_smbustopci(sc);
   1804 
   1805 	/* Reset the chip to a known state. */
   1806 	wm_reset(sc);
   1807 
   1808 	/* Get some information about the EEPROM. */
   1809 	switch (sc->sc_type) {
   1810 	case WM_T_82542_2_0:
   1811 	case WM_T_82542_2_1:
   1812 	case WM_T_82543:
   1813 	case WM_T_82544:
   1814 		/* Microwire */
   1815 		sc->sc_nvm_wordsize = 64;
   1816 		sc->sc_nvm_addrbits = 6;
   1817 		break;
   1818 	case WM_T_82540:
   1819 	case WM_T_82545:
   1820 	case WM_T_82545_3:
   1821 	case WM_T_82546:
   1822 	case WM_T_82546_3:
   1823 		/* Microwire */
   1824 		reg = CSR_READ(sc, WMREG_EECD);
   1825 		if (reg & EECD_EE_SIZE) {
   1826 			sc->sc_nvm_wordsize = 256;
   1827 			sc->sc_nvm_addrbits = 8;
   1828 		} else {
   1829 			sc->sc_nvm_wordsize = 64;
   1830 			sc->sc_nvm_addrbits = 6;
   1831 		}
   1832 		sc->sc_flags |= WM_F_LOCK_EECD;
   1833 		break;
   1834 	case WM_T_82541:
   1835 	case WM_T_82541_2:
   1836 	case WM_T_82547:
   1837 	case WM_T_82547_2:
   1838 		sc->sc_flags |= WM_F_LOCK_EECD;
   1839 		reg = CSR_READ(sc, WMREG_EECD);
   1840 		if (reg & EECD_EE_TYPE) {
   1841 			/* SPI */
   1842 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1843 			wm_nvm_set_addrbits_size_eecd(sc);
   1844 		} else {
   1845 			/* Microwire */
   1846 			if ((reg & EECD_EE_ABITS) != 0) {
   1847 				sc->sc_nvm_wordsize = 256;
   1848 				sc->sc_nvm_addrbits = 8;
   1849 			} else {
   1850 				sc->sc_nvm_wordsize = 64;
   1851 				sc->sc_nvm_addrbits = 6;
   1852 			}
   1853 		}
   1854 		break;
   1855 	case WM_T_82571:
   1856 	case WM_T_82572:
   1857 		/* SPI */
   1858 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1859 		wm_nvm_set_addrbits_size_eecd(sc);
   1860 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1861 		break;
   1862 	case WM_T_82573:
   1863 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1864 		/* FALLTHROUGH */
   1865 	case WM_T_82574:
   1866 	case WM_T_82583:
   1867 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1868 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1869 			sc->sc_nvm_wordsize = 2048;
   1870 		} else {
   1871 			/* SPI */
   1872 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1873 			wm_nvm_set_addrbits_size_eecd(sc);
   1874 		}
   1875 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1876 		break;
   1877 	case WM_T_82575:
   1878 	case WM_T_82576:
   1879 	case WM_T_82580:
   1880 	case WM_T_I350:
   1881 	case WM_T_I354:
   1882 	case WM_T_80003:
   1883 		/* SPI */
   1884 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1885 		wm_nvm_set_addrbits_size_eecd(sc);
   1886 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1887 		    | WM_F_LOCK_SWSM;
   1888 		break;
   1889 	case WM_T_ICH8:
   1890 	case WM_T_ICH9:
   1891 	case WM_T_ICH10:
   1892 	case WM_T_PCH:
   1893 	case WM_T_PCH2:
   1894 	case WM_T_PCH_LPT:
   1895 		/* FLASH */
   1896 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1897 		sc->sc_nvm_wordsize = 2048;
   1898 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1899 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1900 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1901 			aprint_error_dev(sc->sc_dev,
   1902 			    "can't map FLASH registers\n");
   1903 			goto out;
   1904 		}
   1905 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1906 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1907 		    ICH_FLASH_SECTOR_SIZE;
   1908 		sc->sc_ich8_flash_bank_size =
   1909 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1910 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1911 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1912 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1913 		break;
   1914 	case WM_T_I210:
   1915 	case WM_T_I211:
   1916 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1917 			wm_nvm_set_addrbits_size_eecd(sc);
   1918 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1919 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1920 		} else {
   1921 			sc->sc_nvm_wordsize = INVM_SIZE;
   1922 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1923 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1924 		}
   1925 		break;
   1926 	default:
   1927 		break;
   1928 	}
   1929 
   1930 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1931 	switch (sc->sc_type) {
   1932 	case WM_T_82571:
   1933 	case WM_T_82572:
   1934 		reg = CSR_READ(sc, WMREG_SWSM2);
   1935 		if ((reg & SWSM2_LOCK) == 0) {
   1936 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1937 			force_clear_smbi = true;
   1938 		} else
   1939 			force_clear_smbi = false;
   1940 		break;
   1941 	case WM_T_82573:
   1942 	case WM_T_82574:
   1943 	case WM_T_82583:
   1944 		force_clear_smbi = true;
   1945 		break;
   1946 	default:
   1947 		force_clear_smbi = false;
   1948 		break;
   1949 	}
   1950 	if (force_clear_smbi) {
   1951 		reg = CSR_READ(sc, WMREG_SWSM);
   1952 		if ((reg & SWSM_SMBI) != 0)
   1953 			aprint_error_dev(sc->sc_dev,
   1954 			    "Please update the Bootagent\n");
   1955 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1956 	}
   1957 
   1958 	/*
   1959 	 * Defer printing the EEPROM type until after verifying the checksum
   1960 	 * This allows the EEPROM type to be printed correctly in the case
   1961 	 * that no EEPROM is attached.
   1962 	 */
   1963 	/*
   1964 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1965 	 * this for later, so we can fail future reads from the EEPROM.
   1966 	 */
   1967 	if (wm_nvm_validate_checksum(sc)) {
   1968 		/*
   1969 		 * Read twice again because some PCI-e parts fail the
   1970 		 * first check due to the link being in sleep state.
   1971 		 */
   1972 		if (wm_nvm_validate_checksum(sc))
   1973 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1974 	}
   1975 
   1976 	/* Set device properties (macflags) */
   1977 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1978 
   1979 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1980 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   1981 	else {
   1982 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   1983 		    sc->sc_nvm_wordsize);
   1984 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   1985 			aprint_verbose("iNVM");
   1986 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   1987 			aprint_verbose("FLASH(HW)");
   1988 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   1989 			aprint_verbose("FLASH");
   1990 		else {
   1991 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   1992 				eetype = "SPI";
   1993 			else
   1994 				eetype = "MicroWire";
   1995 			aprint_verbose("(%d address bits) %s EEPROM",
   1996 			    sc->sc_nvm_addrbits, eetype);
   1997 		}
   1998 	}
   1999 	wm_nvm_version(sc);
   2000 	aprint_verbose("\n");
   2001 
   2002 	/* Check for I21[01] PLL workaround */
   2003 	if (sc->sc_type == WM_T_I210)
   2004 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2005 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2006 		/* NVM image release 3.25 has a workaround */
   2007 		if ((sc->sc_nvm_ver_major < 3)
   2008 		    || ((sc->sc_nvm_ver_major == 3)
   2009 			&& (sc->sc_nvm_ver_minor < 25))) {
   2010 			aprint_verbose_dev(sc->sc_dev,
   2011 			    "ROM image version %d.%d is older than 3.25\n",
   2012 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2013 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2014 		}
   2015 	}
   2016 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2017 		wm_pll_workaround_i210(sc);
   2018 
   2019 	wm_get_wakeup(sc);
   2020 	switch (sc->sc_type) {
   2021 	case WM_T_82571:
   2022 	case WM_T_82572:
   2023 	case WM_T_82573:
   2024 	case WM_T_82574:
   2025 	case WM_T_82583:
   2026 	case WM_T_80003:
   2027 	case WM_T_ICH8:
   2028 	case WM_T_ICH9:
   2029 	case WM_T_ICH10:
   2030 	case WM_T_PCH:
   2031 	case WM_T_PCH2:
   2032 	case WM_T_PCH_LPT:
   2033 		/* Non-AMT based hardware can now take control from firmware */
   2034 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2035 			wm_get_hw_control(sc);
   2036 		break;
   2037 	default:
   2038 		break;
   2039 	}
   2040 
   2041 	/*
   2042 	 * Read the Ethernet address from the EEPROM, if not first found
   2043 	 * in device properties.
   2044 	 */
   2045 	ea = prop_dictionary_get(dict, "mac-address");
   2046 	if (ea != NULL) {
   2047 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2048 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2049 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2050 	} else {
   2051 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2052 			aprint_error_dev(sc->sc_dev,
   2053 			    "unable to read Ethernet address\n");
   2054 			goto out;
   2055 		}
   2056 	}
   2057 
   2058 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2059 	    ether_sprintf(enaddr));
   2060 
   2061 	/*
   2062 	 * Read the config info from the EEPROM, and set up various
   2063 	 * bits in the control registers based on their contents.
   2064 	 */
   2065 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2066 	if (pn != NULL) {
   2067 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2068 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2069 	} else {
   2070 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2071 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2072 			goto out;
   2073 		}
   2074 	}
   2075 
   2076 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2077 	if (pn != NULL) {
   2078 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2079 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2080 	} else {
   2081 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2082 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2083 			goto out;
   2084 		}
   2085 	}
   2086 
   2087 	/* check for WM_F_WOL */
   2088 	switch (sc->sc_type) {
   2089 	case WM_T_82542_2_0:
   2090 	case WM_T_82542_2_1:
   2091 	case WM_T_82543:
   2092 		/* dummy? */
   2093 		eeprom_data = 0;
   2094 		apme_mask = NVM_CFG3_APME;
   2095 		break;
   2096 	case WM_T_82544:
   2097 		apme_mask = NVM_CFG2_82544_APM_EN;
   2098 		eeprom_data = cfg2;
   2099 		break;
   2100 	case WM_T_82546:
   2101 	case WM_T_82546_3:
   2102 	case WM_T_82571:
   2103 	case WM_T_82572:
   2104 	case WM_T_82573:
   2105 	case WM_T_82574:
   2106 	case WM_T_82583:
   2107 	case WM_T_80003:
   2108 	default:
   2109 		apme_mask = NVM_CFG3_APME;
   2110 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2111 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2112 		break;
   2113 	case WM_T_82575:
   2114 	case WM_T_82576:
   2115 	case WM_T_82580:
   2116 	case WM_T_I350:
   2117 	case WM_T_I354: /* XXX ok? */
   2118 	case WM_T_ICH8:
   2119 	case WM_T_ICH9:
   2120 	case WM_T_ICH10:
   2121 	case WM_T_PCH:
   2122 	case WM_T_PCH2:
   2123 	case WM_T_PCH_LPT:
   2124 		/* XXX The funcid should be checked on some devices */
   2125 		apme_mask = WUC_APME;
   2126 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2127 		break;
   2128 	}
   2129 
   2130 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2131 	if ((eeprom_data & apme_mask) != 0)
   2132 		sc->sc_flags |= WM_F_WOL;
   2133 #ifdef WM_DEBUG
   2134 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2135 		printf("WOL\n");
   2136 #endif
   2137 
   2138 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2139 		/* Check NVM for autonegotiation */
   2140 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2141 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2142 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2143 		}
   2144 	}
   2145 
   2146 	/*
   2147 	 * XXX need special handling for some multiple port cards
   2148 	 * to disable a paticular port.
   2149 	 */
   2150 
   2151 	if (sc->sc_type >= WM_T_82544) {
   2152 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2153 		if (pn != NULL) {
   2154 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2155 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2156 		} else {
   2157 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2158 				aprint_error_dev(sc->sc_dev,
   2159 				    "unable to read SWDPIN\n");
   2160 				goto out;
   2161 			}
   2162 		}
   2163 	}
   2164 
   2165 	if (cfg1 & NVM_CFG1_ILOS)
   2166 		sc->sc_ctrl |= CTRL_ILOS;
   2167 
   2168 	/*
   2169 	 * XXX
   2170 	 * This code isn't correct because pin 2 and 3 are located
   2171 	 * in different position on newer chips. Check all datasheet.
   2172 	 *
   2173 	 * Until resolve this problem, check if a chip < 82580
   2174 	 */
   2175 	if (sc->sc_type <= WM_T_82580) {
   2176 		if (sc->sc_type >= WM_T_82544) {
   2177 			sc->sc_ctrl |=
   2178 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2179 			    CTRL_SWDPIO_SHIFT;
   2180 			sc->sc_ctrl |=
   2181 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2182 			    CTRL_SWDPINS_SHIFT;
   2183 		} else {
   2184 			sc->sc_ctrl |=
   2185 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2186 			    CTRL_SWDPIO_SHIFT;
   2187 		}
   2188 	}
   2189 
   2190 	/* XXX For other than 82580? */
   2191 	if (sc->sc_type == WM_T_82580) {
   2192 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2193 		if (nvmword & __BIT(13))
   2194 			sc->sc_ctrl |= CTRL_ILOS;
   2195 	}
   2196 
   2197 #if 0
   2198 	if (sc->sc_type >= WM_T_82544) {
   2199 		if (cfg1 & NVM_CFG1_IPS0)
   2200 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2201 		if (cfg1 & NVM_CFG1_IPS1)
   2202 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2203 		sc->sc_ctrl_ext |=
   2204 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2205 		    CTRL_EXT_SWDPIO_SHIFT;
   2206 		sc->sc_ctrl_ext |=
   2207 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2208 		    CTRL_EXT_SWDPINS_SHIFT;
   2209 	} else {
   2210 		sc->sc_ctrl_ext |=
   2211 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2212 		    CTRL_EXT_SWDPIO_SHIFT;
   2213 	}
   2214 #endif
   2215 
   2216 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2217 #if 0
   2218 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2219 #endif
   2220 
   2221 	if (sc->sc_type == WM_T_PCH) {
   2222 		uint16_t val;
   2223 
   2224 		/* Save the NVM K1 bit setting */
   2225 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2226 
   2227 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2228 			sc->sc_nvm_k1_enabled = 1;
   2229 		else
   2230 			sc->sc_nvm_k1_enabled = 0;
   2231 	}
   2232 
   2233 	/*
   2234 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2235 	 * media structures accordingly.
   2236 	 */
   2237 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2238 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2239 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2240 	    || sc->sc_type == WM_T_82573
   2241 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2242 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2243 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2244 	} else if (sc->sc_type < WM_T_82543 ||
   2245 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2246 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2247 			aprint_error_dev(sc->sc_dev,
   2248 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2249 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2250 		}
   2251 		wm_tbi_mediainit(sc);
   2252 	} else {
   2253 		switch (sc->sc_type) {
   2254 		case WM_T_82575:
   2255 		case WM_T_82576:
   2256 		case WM_T_82580:
   2257 		case WM_T_I350:
   2258 		case WM_T_I354:
   2259 		case WM_T_I210:
   2260 		case WM_T_I211:
   2261 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2262 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2263 			switch (link_mode) {
   2264 			case CTRL_EXT_LINK_MODE_1000KX:
   2265 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2266 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2267 				break;
   2268 			case CTRL_EXT_LINK_MODE_SGMII:
   2269 				if (wm_sgmii_uses_mdio(sc)) {
   2270 					aprint_verbose_dev(sc->sc_dev,
   2271 					    "SGMII(MDIO)\n");
   2272 					sc->sc_flags |= WM_F_SGMII;
   2273 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2274 					break;
   2275 				}
   2276 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2277 				/*FALLTHROUGH*/
   2278 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2279 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2280 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2281 					if (link_mode
   2282 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2283 						sc->sc_mediatype
   2284 						    = WM_MEDIATYPE_COPPER;
   2285 						sc->sc_flags |= WM_F_SGMII;
   2286 					} else {
   2287 						sc->sc_mediatype
   2288 						    = WM_MEDIATYPE_SERDES;
   2289 						aprint_verbose_dev(sc->sc_dev,
   2290 						    "SERDES\n");
   2291 					}
   2292 					break;
   2293 				}
   2294 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2295 					aprint_verbose_dev(sc->sc_dev,
   2296 					    "SERDES\n");
   2297 
   2298 				/* Change current link mode setting */
   2299 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2300 				switch (sc->sc_mediatype) {
   2301 				case WM_MEDIATYPE_COPPER:
   2302 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2303 					break;
   2304 				case WM_MEDIATYPE_SERDES:
   2305 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2306 					break;
   2307 				default:
   2308 					break;
   2309 				}
   2310 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2311 				break;
   2312 			case CTRL_EXT_LINK_MODE_GMII:
   2313 			default:
   2314 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2315 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2316 				break;
   2317 			}
   2318 
   2319 			reg &= ~CTRL_EXT_I2C_ENA;
   2320 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2321 				reg |= CTRL_EXT_I2C_ENA;
   2322 			else
   2323 				reg &= ~CTRL_EXT_I2C_ENA;
   2324 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2325 
   2326 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2327 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2328 			else
   2329 				wm_tbi_mediainit(sc);
   2330 			break;
   2331 		default:
   2332 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2333 				aprint_error_dev(sc->sc_dev,
   2334 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2335 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2336 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2337 		}
   2338 	}
   2339 
   2340 	ifp = &sc->sc_ethercom.ec_if;
   2341 	xname = device_xname(sc->sc_dev);
   2342 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2343 	ifp->if_softc = sc;
   2344 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2345 	ifp->if_ioctl = wm_ioctl;
   2346 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2347 		ifp->if_start = wm_nq_start;
   2348 	else
   2349 		ifp->if_start = wm_start;
   2350 	ifp->if_watchdog = wm_watchdog;
   2351 	ifp->if_init = wm_init;
   2352 	ifp->if_stop = wm_stop;
   2353 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2354 	IFQ_SET_READY(&ifp->if_snd);
   2355 
   2356 	/* Check for jumbo frame */
   2357 	switch (sc->sc_type) {
   2358 	case WM_T_82573:
   2359 		/* XXX limited to 9234 if ASPM is disabled */
   2360 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2361 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2362 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2363 		break;
   2364 	case WM_T_82571:
   2365 	case WM_T_82572:
   2366 	case WM_T_82574:
   2367 	case WM_T_82575:
   2368 	case WM_T_82576:
   2369 	case WM_T_82580:
   2370 	case WM_T_I350:
   2371 	case WM_T_I354: /* XXXX ok? */
   2372 	case WM_T_I210:
   2373 	case WM_T_I211:
   2374 	case WM_T_80003:
   2375 	case WM_T_ICH9:
   2376 	case WM_T_ICH10:
   2377 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2378 	case WM_T_PCH_LPT:
   2379 		/* XXX limited to 9234 */
   2380 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2381 		break;
   2382 	case WM_T_PCH:
   2383 		/* XXX limited to 4096 */
   2384 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2385 		break;
   2386 	case WM_T_82542_2_0:
   2387 	case WM_T_82542_2_1:
   2388 	case WM_T_82583:
   2389 	case WM_T_ICH8:
   2390 		/* No support for jumbo frame */
   2391 		break;
   2392 	default:
   2393 		/* ETHER_MAX_LEN_JUMBO */
   2394 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2395 		break;
   2396 	}
   2397 
   2398 	/* If we're a i82543 or greater, we can support VLANs. */
   2399 	if (sc->sc_type >= WM_T_82543)
   2400 		sc->sc_ethercom.ec_capabilities |=
   2401 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2402 
   2403 	/*
   2404 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2405 	 * on i82543 and later.
   2406 	 */
   2407 	if (sc->sc_type >= WM_T_82543) {
   2408 		ifp->if_capabilities |=
   2409 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2410 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2411 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2412 		    IFCAP_CSUM_TCPv6_Tx |
   2413 		    IFCAP_CSUM_UDPv6_Tx;
   2414 	}
   2415 
   2416 	/*
   2417 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2418 	 *
   2419 	 *	82541GI (8086:1076) ... no
   2420 	 *	82572EI (8086:10b9) ... yes
   2421 	 */
   2422 	if (sc->sc_type >= WM_T_82571) {
   2423 		ifp->if_capabilities |=
   2424 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2425 	}
   2426 
   2427 	/*
   2428 	 * If we're a i82544 or greater (except i82547), we can do
   2429 	 * TCP segmentation offload.
   2430 	 */
   2431 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2432 		ifp->if_capabilities |= IFCAP_TSOv4;
   2433 	}
   2434 
   2435 	if (sc->sc_type >= WM_T_82571) {
   2436 		ifp->if_capabilities |= IFCAP_TSOv6;
   2437 	}
   2438 
   2439 #ifdef WM_MPSAFE
   2440 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2441 #else
   2442 	sc->sc_core_lock = NULL;
   2443 #endif
   2444 
   2445 	/* Attach the interface. */
   2446 	if_initialize(ifp);
   2447 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2448 	ether_ifattach(ifp, enaddr);
   2449 	if_register(ifp);
   2450 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2451 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2452 			  RND_FLAG_DEFAULT);
   2453 
   2454 #ifdef WM_EVENT_COUNTERS
   2455 	/* Attach event counters. */
   2456 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2457 	    NULL, xname, "txsstall");
   2458 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2459 	    NULL, xname, "txdstall");
   2460 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2461 	    NULL, xname, "txfifo_stall");
   2462 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2463 	    NULL, xname, "txdw");
   2464 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2465 	    NULL, xname, "txqe");
   2466 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2467 	    NULL, xname, "rxintr");
   2468 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2469 	    NULL, xname, "linkintr");
   2470 
   2471 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2472 	    NULL, xname, "rxipsum");
   2473 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2474 	    NULL, xname, "rxtusum");
   2475 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2476 	    NULL, xname, "txipsum");
   2477 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2478 	    NULL, xname, "txtusum");
   2479 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2480 	    NULL, xname, "txtusum6");
   2481 
   2482 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2483 	    NULL, xname, "txtso");
   2484 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2485 	    NULL, xname, "txtso6");
   2486 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2487 	    NULL, xname, "txtsopain");
   2488 
   2489 	for (i = 0; i < WM_NTXSEGS; i++) {
   2490 		snprintf(wm_txseg_evcnt_names[i],
   2491 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2492 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2493 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2494 	}
   2495 
   2496 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2497 	    NULL, xname, "txdrop");
   2498 
   2499 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2500 	    NULL, xname, "tu");
   2501 
   2502 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2503 	    NULL, xname, "tx_xoff");
   2504 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2505 	    NULL, xname, "tx_xon");
   2506 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2507 	    NULL, xname, "rx_xoff");
   2508 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2509 	    NULL, xname, "rx_xon");
   2510 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2511 	    NULL, xname, "rx_macctl");
   2512 #endif /* WM_EVENT_COUNTERS */
   2513 
   2514 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2515 		pmf_class_network_register(self, ifp);
   2516 	else
   2517 		aprint_error_dev(self, "couldn't establish power handler\n");
   2518 
   2519 	sc->sc_flags |= WM_F_ATTACHED;
   2520  out:
   2521 	return;
   2522 }
   2523 
   2524 /* The detach function (ca_detach) */
   2525 static int
   2526 wm_detach(device_t self, int flags __unused)
   2527 {
   2528 	struct wm_softc *sc = device_private(self);
   2529 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2530 	int i;
   2531 #ifndef WM_MPSAFE
   2532 	int s;
   2533 #endif
   2534 
   2535 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2536 		return 0;
   2537 
   2538 #ifndef WM_MPSAFE
   2539 	s = splnet();
   2540 #endif
   2541 	/* Stop the interface. Callouts are stopped in it. */
   2542 	wm_stop(ifp, 1);
   2543 
   2544 #ifndef WM_MPSAFE
   2545 	splx(s);
   2546 #endif
   2547 
   2548 	pmf_device_deregister(self);
   2549 
   2550 	/* Tell the firmware about the release */
   2551 	WM_CORE_LOCK(sc);
   2552 	wm_release_manageability(sc);
   2553 	wm_release_hw_control(sc);
   2554 	WM_CORE_UNLOCK(sc);
   2555 
   2556 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2557 
   2558 	/* Delete all remaining media. */
   2559 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2560 
   2561 	ether_ifdetach(ifp);
   2562 	if_detach(ifp);
   2563 	if_percpuq_destroy(sc->sc_ipq);
   2564 
   2565 	/* Unload RX dmamaps and free mbufs */
   2566 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2567 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2568 		WM_RX_LOCK(rxq);
   2569 		wm_rxdrain(rxq);
   2570 		WM_RX_UNLOCK(rxq);
   2571 	}
   2572 	/* Must unlock here */
   2573 
   2574 	wm_free_txrx_queues(sc);
   2575 
   2576 	/* Disestablish the interrupt handler */
   2577 	for (i = 0; i < sc->sc_nintrs; i++) {
   2578 		if (sc->sc_ihs[i] != NULL) {
   2579 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2580 			sc->sc_ihs[i] = NULL;
   2581 		}
   2582 	}
   2583 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2584 
   2585 	/* Unmap the registers */
   2586 	if (sc->sc_ss) {
   2587 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2588 		sc->sc_ss = 0;
   2589 	}
   2590 	if (sc->sc_ios) {
   2591 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2592 		sc->sc_ios = 0;
   2593 	}
   2594 	if (sc->sc_flashs) {
   2595 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2596 		sc->sc_flashs = 0;
   2597 	}
   2598 
   2599 	if (sc->sc_core_lock)
   2600 		mutex_obj_free(sc->sc_core_lock);
   2601 
   2602 	return 0;
   2603 }
   2604 
   2605 static bool
   2606 wm_suspend(device_t self, const pmf_qual_t *qual)
   2607 {
   2608 	struct wm_softc *sc = device_private(self);
   2609 
   2610 	wm_release_manageability(sc);
   2611 	wm_release_hw_control(sc);
   2612 #ifdef WM_WOL
   2613 	wm_enable_wakeup(sc);
   2614 #endif
   2615 
   2616 	return true;
   2617 }
   2618 
   2619 static bool
   2620 wm_resume(device_t self, const pmf_qual_t *qual)
   2621 {
   2622 	struct wm_softc *sc = device_private(self);
   2623 
   2624 	wm_init_manageability(sc);
   2625 
   2626 	return true;
   2627 }
   2628 
   2629 /*
   2630  * wm_watchdog:		[ifnet interface function]
   2631  *
   2632  *	Watchdog timer handler.
   2633  */
   2634 static void
   2635 wm_watchdog(struct ifnet *ifp)
   2636 {
   2637 	struct wm_softc *sc = ifp->if_softc;
   2638 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2639 
   2640 	/*
   2641 	 * Since we're using delayed interrupts, sweep up
   2642 	 * before we report an error.
   2643 	 */
   2644 	WM_TX_LOCK(txq);
   2645 	wm_txeof(sc);
   2646 	WM_TX_UNLOCK(txq);
   2647 
   2648 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2649 #ifdef WM_DEBUG
   2650 		int i, j;
   2651 		struct wm_txsoft *txs;
   2652 #endif
   2653 		log(LOG_ERR,
   2654 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2655 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2656 		    txq->txq_next);
   2657 		ifp->if_oerrors++;
   2658 #ifdef WM_DEBUG
   2659 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2660 		    i = WM_NEXTTXS(txq, i)) {
   2661 		    txs = &txq->txq_soft[i];
   2662 		    printf("txs %d tx %d -> %d\n",
   2663 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2664 		    for (j = txs->txs_firstdesc; ;
   2665 			j = WM_NEXTTX(txq, j)) {
   2666 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2667 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2668 			printf("\t %#08x%08x\n",
   2669 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2670 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2671 			if (j == txs->txs_lastdesc)
   2672 				break;
   2673 			}
   2674 		}
   2675 #endif
   2676 		/* Reset the interface. */
   2677 		(void) wm_init(ifp);
   2678 	}
   2679 
   2680 	/* Try to get more packets going. */
   2681 	ifp->if_start(ifp);
   2682 }
   2683 
   2684 /*
   2685  * wm_tick:
   2686  *
   2687  *	One second timer, used to check link status, sweep up
   2688  *	completed transmit jobs, etc.
   2689  */
   2690 static void
   2691 wm_tick(void *arg)
   2692 {
   2693 	struct wm_softc *sc = arg;
   2694 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2695 #ifndef WM_MPSAFE
   2696 	int s;
   2697 
   2698 	s = splnet();
   2699 #endif
   2700 
   2701 	WM_CORE_LOCK(sc);
   2702 
   2703 	if (sc->sc_stopping)
   2704 		goto out;
   2705 
   2706 	if (sc->sc_type >= WM_T_82542_2_1) {
   2707 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2708 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2709 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2710 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2711 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2712 	}
   2713 
   2714 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2715 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2716 	    + CSR_READ(sc, WMREG_CRCERRS)
   2717 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2718 	    + CSR_READ(sc, WMREG_SYMERRC)
   2719 	    + CSR_READ(sc, WMREG_RXERRC)
   2720 	    + CSR_READ(sc, WMREG_SEC)
   2721 	    + CSR_READ(sc, WMREG_CEXTERR)
   2722 	    + CSR_READ(sc, WMREG_RLEC);
   2723 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2724 
   2725 	if (sc->sc_flags & WM_F_HAS_MII)
   2726 		mii_tick(&sc->sc_mii);
   2727 	else if ((sc->sc_type >= WM_T_82575)
   2728 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2729 		wm_serdes_tick(sc);
   2730 	else
   2731 		wm_tbi_tick(sc);
   2732 
   2733 out:
   2734 	WM_CORE_UNLOCK(sc);
   2735 #ifndef WM_MPSAFE
   2736 	splx(s);
   2737 #endif
   2738 
   2739 	if (!sc->sc_stopping)
   2740 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2741 }
   2742 
   2743 static int
   2744 wm_ifflags_cb(struct ethercom *ec)
   2745 {
   2746 	struct ifnet *ifp = &ec->ec_if;
   2747 	struct wm_softc *sc = ifp->if_softc;
   2748 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2749 	int rc = 0;
   2750 
   2751 	WM_CORE_LOCK(sc);
   2752 
   2753 	if (change != 0)
   2754 		sc->sc_if_flags = ifp->if_flags;
   2755 
   2756 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2757 		rc = ENETRESET;
   2758 		goto out;
   2759 	}
   2760 
   2761 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2762 		wm_set_filter(sc);
   2763 
   2764 	wm_set_vlan(sc);
   2765 
   2766 out:
   2767 	WM_CORE_UNLOCK(sc);
   2768 
   2769 	return rc;
   2770 }
   2771 
   2772 /*
   2773  * wm_ioctl:		[ifnet interface function]
   2774  *
   2775  *	Handle control requests from the operator.
   2776  */
   2777 static int
   2778 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2779 {
   2780 	struct wm_softc *sc = ifp->if_softc;
   2781 	struct ifreq *ifr = (struct ifreq *) data;
   2782 	struct ifaddr *ifa = (struct ifaddr *)data;
   2783 	struct sockaddr_dl *sdl;
   2784 	int s, error;
   2785 
   2786 #ifndef WM_MPSAFE
   2787 	s = splnet();
   2788 #endif
   2789 	switch (cmd) {
   2790 	case SIOCSIFMEDIA:
   2791 	case SIOCGIFMEDIA:
   2792 		WM_CORE_LOCK(sc);
   2793 		/* Flow control requires full-duplex mode. */
   2794 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2795 		    (ifr->ifr_media & IFM_FDX) == 0)
   2796 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2797 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2798 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2799 				/* We can do both TXPAUSE and RXPAUSE. */
   2800 				ifr->ifr_media |=
   2801 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2802 			}
   2803 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2804 		}
   2805 		WM_CORE_UNLOCK(sc);
   2806 #ifdef WM_MPSAFE
   2807 		s = splnet();
   2808 #endif
   2809 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2810 #ifdef WM_MPSAFE
   2811 		splx(s);
   2812 #endif
   2813 		break;
   2814 	case SIOCINITIFADDR:
   2815 		WM_CORE_LOCK(sc);
   2816 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2817 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2818 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2819 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2820 			/* unicast address is first multicast entry */
   2821 			wm_set_filter(sc);
   2822 			error = 0;
   2823 			WM_CORE_UNLOCK(sc);
   2824 			break;
   2825 		}
   2826 		WM_CORE_UNLOCK(sc);
   2827 		/*FALLTHROUGH*/
   2828 	default:
   2829 #ifdef WM_MPSAFE
   2830 		s = splnet();
   2831 #endif
   2832 		/* It may call wm_start, so unlock here */
   2833 		error = ether_ioctl(ifp, cmd, data);
   2834 #ifdef WM_MPSAFE
   2835 		splx(s);
   2836 #endif
   2837 		if (error != ENETRESET)
   2838 			break;
   2839 
   2840 		error = 0;
   2841 
   2842 		if (cmd == SIOCSIFCAP) {
   2843 			error = (*ifp->if_init)(ifp);
   2844 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2845 			;
   2846 		else if (ifp->if_flags & IFF_RUNNING) {
   2847 			/*
   2848 			 * Multicast list has changed; set the hardware filter
   2849 			 * accordingly.
   2850 			 */
   2851 			WM_CORE_LOCK(sc);
   2852 			wm_set_filter(sc);
   2853 			WM_CORE_UNLOCK(sc);
   2854 		}
   2855 		break;
   2856 	}
   2857 
   2858 #ifndef WM_MPSAFE
   2859 	splx(s);
   2860 #endif
   2861 	return error;
   2862 }
   2863 
   2864 /* MAC address related */
   2865 
   2866 /*
   2867  * Get the offset of MAC address and return it.
   2868  * If error occured, use offset 0.
   2869  */
   2870 static uint16_t
   2871 wm_check_alt_mac_addr(struct wm_softc *sc)
   2872 {
   2873 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2874 	uint16_t offset = NVM_OFF_MACADDR;
   2875 
   2876 	/* Try to read alternative MAC address pointer */
   2877 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2878 		return 0;
   2879 
   2880 	/* Check pointer if it's valid or not. */
   2881 	if ((offset == 0x0000) || (offset == 0xffff))
   2882 		return 0;
   2883 
   2884 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2885 	/*
   2886 	 * Check whether alternative MAC address is valid or not.
   2887 	 * Some cards have non 0xffff pointer but those don't use
   2888 	 * alternative MAC address in reality.
   2889 	 *
   2890 	 * Check whether the broadcast bit is set or not.
   2891 	 */
   2892 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2893 		if (((myea[0] & 0xff) & 0x01) == 0)
   2894 			return offset; /* Found */
   2895 
   2896 	/* Not found */
   2897 	return 0;
   2898 }
   2899 
   2900 static int
   2901 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2902 {
   2903 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2904 	uint16_t offset = NVM_OFF_MACADDR;
   2905 	int do_invert = 0;
   2906 
   2907 	switch (sc->sc_type) {
   2908 	case WM_T_82580:
   2909 	case WM_T_I350:
   2910 	case WM_T_I354:
   2911 		/* EEPROM Top Level Partitioning */
   2912 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2913 		break;
   2914 	case WM_T_82571:
   2915 	case WM_T_82575:
   2916 	case WM_T_82576:
   2917 	case WM_T_80003:
   2918 	case WM_T_I210:
   2919 	case WM_T_I211:
   2920 		offset = wm_check_alt_mac_addr(sc);
   2921 		if (offset == 0)
   2922 			if ((sc->sc_funcid & 0x01) == 1)
   2923 				do_invert = 1;
   2924 		break;
   2925 	default:
   2926 		if ((sc->sc_funcid & 0x01) == 1)
   2927 			do_invert = 1;
   2928 		break;
   2929 	}
   2930 
   2931 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2932 		myea) != 0)
   2933 		goto bad;
   2934 
   2935 	enaddr[0] = myea[0] & 0xff;
   2936 	enaddr[1] = myea[0] >> 8;
   2937 	enaddr[2] = myea[1] & 0xff;
   2938 	enaddr[3] = myea[1] >> 8;
   2939 	enaddr[4] = myea[2] & 0xff;
   2940 	enaddr[5] = myea[2] >> 8;
   2941 
   2942 	/*
   2943 	 * Toggle the LSB of the MAC address on the second port
   2944 	 * of some dual port cards.
   2945 	 */
   2946 	if (do_invert != 0)
   2947 		enaddr[5] ^= 1;
   2948 
   2949 	return 0;
   2950 
   2951  bad:
   2952 	return -1;
   2953 }
   2954 
   2955 /*
   2956  * wm_set_ral:
   2957  *
   2958  *	Set an entery in the receive address list.
   2959  */
   2960 static void
   2961 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2962 {
   2963 	uint32_t ral_lo, ral_hi;
   2964 
   2965 	if (enaddr != NULL) {
   2966 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2967 		    (enaddr[3] << 24);
   2968 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2969 		ral_hi |= RAL_AV;
   2970 	} else {
   2971 		ral_lo = 0;
   2972 		ral_hi = 0;
   2973 	}
   2974 
   2975 	if (sc->sc_type >= WM_T_82544) {
   2976 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2977 		    ral_lo);
   2978 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2979 		    ral_hi);
   2980 	} else {
   2981 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2982 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2983 	}
   2984 }
   2985 
   2986 /*
   2987  * wm_mchash:
   2988  *
   2989  *	Compute the hash of the multicast address for the 4096-bit
   2990  *	multicast filter.
   2991  */
   2992 static uint32_t
   2993 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2994 {
   2995 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2996 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2997 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2998 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2999 	uint32_t hash;
   3000 
   3001 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3002 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3003 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3004 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3005 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3006 		return (hash & 0x3ff);
   3007 	}
   3008 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3009 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3010 
   3011 	return (hash & 0xfff);
   3012 }
   3013 
   3014 /*
   3015  * wm_set_filter:
   3016  *
   3017  *	Set up the receive filter.
   3018  */
   3019 static void
   3020 wm_set_filter(struct wm_softc *sc)
   3021 {
   3022 	struct ethercom *ec = &sc->sc_ethercom;
   3023 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3024 	struct ether_multi *enm;
   3025 	struct ether_multistep step;
   3026 	bus_addr_t mta_reg;
   3027 	uint32_t hash, reg, bit;
   3028 	int i, size, ralmax;
   3029 
   3030 	if (sc->sc_type >= WM_T_82544)
   3031 		mta_reg = WMREG_CORDOVA_MTA;
   3032 	else
   3033 		mta_reg = WMREG_MTA;
   3034 
   3035 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3036 
   3037 	if (ifp->if_flags & IFF_BROADCAST)
   3038 		sc->sc_rctl |= RCTL_BAM;
   3039 	if (ifp->if_flags & IFF_PROMISC) {
   3040 		sc->sc_rctl |= RCTL_UPE;
   3041 		goto allmulti;
   3042 	}
   3043 
   3044 	/*
   3045 	 * Set the station address in the first RAL slot, and
   3046 	 * clear the remaining slots.
   3047 	 */
   3048 	if (sc->sc_type == WM_T_ICH8)
   3049 		size = WM_RAL_TABSIZE_ICH8 -1;
   3050 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3051 	    || (sc->sc_type == WM_T_PCH))
   3052 		size = WM_RAL_TABSIZE_ICH8;
   3053 	else if (sc->sc_type == WM_T_PCH2)
   3054 		size = WM_RAL_TABSIZE_PCH2;
   3055 	else if (sc->sc_type == WM_T_PCH_LPT)
   3056 		size = WM_RAL_TABSIZE_PCH_LPT;
   3057 	else if (sc->sc_type == WM_T_82575)
   3058 		size = WM_RAL_TABSIZE_82575;
   3059 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3060 		size = WM_RAL_TABSIZE_82576;
   3061 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3062 		size = WM_RAL_TABSIZE_I350;
   3063 	else
   3064 		size = WM_RAL_TABSIZE;
   3065 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3066 
   3067 	if (sc->sc_type == WM_T_PCH_LPT) {
   3068 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3069 		switch (i) {
   3070 		case 0:
   3071 			/* We can use all entries */
   3072 			ralmax = size;
   3073 			break;
   3074 		case 1:
   3075 			/* Only RAR[0] */
   3076 			ralmax = 1;
   3077 			break;
   3078 		default:
   3079 			/* available SHRA + RAR[0] */
   3080 			ralmax = i + 1;
   3081 		}
   3082 	} else
   3083 		ralmax = size;
   3084 	for (i = 1; i < size; i++) {
   3085 		if (i < ralmax)
   3086 			wm_set_ral(sc, NULL, i);
   3087 	}
   3088 
   3089 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3090 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3091 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3092 		size = WM_ICH8_MC_TABSIZE;
   3093 	else
   3094 		size = WM_MC_TABSIZE;
   3095 	/* Clear out the multicast table. */
   3096 	for (i = 0; i < size; i++)
   3097 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3098 
   3099 	ETHER_FIRST_MULTI(step, ec, enm);
   3100 	while (enm != NULL) {
   3101 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3102 			/*
   3103 			 * We must listen to a range of multicast addresses.
   3104 			 * For now, just accept all multicasts, rather than
   3105 			 * trying to set only those filter bits needed to match
   3106 			 * the range.  (At this time, the only use of address
   3107 			 * ranges is for IP multicast routing, for which the
   3108 			 * range is big enough to require all bits set.)
   3109 			 */
   3110 			goto allmulti;
   3111 		}
   3112 
   3113 		hash = wm_mchash(sc, enm->enm_addrlo);
   3114 
   3115 		reg = (hash >> 5);
   3116 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3117 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3118 		    || (sc->sc_type == WM_T_PCH2)
   3119 		    || (sc->sc_type == WM_T_PCH_LPT))
   3120 			reg &= 0x1f;
   3121 		else
   3122 			reg &= 0x7f;
   3123 		bit = hash & 0x1f;
   3124 
   3125 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3126 		hash |= 1U << bit;
   3127 
   3128 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3129 			/*
   3130 			 * 82544 Errata 9: Certain register cannot be written
   3131 			 * with particular alignments in PCI-X bus operation
   3132 			 * (FCAH, MTA and VFTA).
   3133 			 */
   3134 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3135 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3136 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3137 		} else
   3138 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3139 
   3140 		ETHER_NEXT_MULTI(step, enm);
   3141 	}
   3142 
   3143 	ifp->if_flags &= ~IFF_ALLMULTI;
   3144 	goto setit;
   3145 
   3146  allmulti:
   3147 	ifp->if_flags |= IFF_ALLMULTI;
   3148 	sc->sc_rctl |= RCTL_MPE;
   3149 
   3150  setit:
   3151 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3152 }
   3153 
   3154 /* Reset and init related */
   3155 
   3156 static void
   3157 wm_set_vlan(struct wm_softc *sc)
   3158 {
   3159 	/* Deal with VLAN enables. */
   3160 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3161 		sc->sc_ctrl |= CTRL_VME;
   3162 	else
   3163 		sc->sc_ctrl &= ~CTRL_VME;
   3164 
   3165 	/* Write the control registers. */
   3166 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3167 }
   3168 
   3169 static void
   3170 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3171 {
   3172 	uint32_t gcr;
   3173 	pcireg_t ctrl2;
   3174 
   3175 	gcr = CSR_READ(sc, WMREG_GCR);
   3176 
   3177 	/* Only take action if timeout value is defaulted to 0 */
   3178 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3179 		goto out;
   3180 
   3181 	if ((gcr & GCR_CAP_VER2) == 0) {
   3182 		gcr |= GCR_CMPL_TMOUT_10MS;
   3183 		goto out;
   3184 	}
   3185 
   3186 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3187 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3188 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3189 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3190 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3191 
   3192 out:
   3193 	/* Disable completion timeout resend */
   3194 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3195 
   3196 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3197 }
   3198 
   3199 void
   3200 wm_get_auto_rd_done(struct wm_softc *sc)
   3201 {
   3202 	int i;
   3203 
   3204 	/* wait for eeprom to reload */
   3205 	switch (sc->sc_type) {
   3206 	case WM_T_82571:
   3207 	case WM_T_82572:
   3208 	case WM_T_82573:
   3209 	case WM_T_82574:
   3210 	case WM_T_82583:
   3211 	case WM_T_82575:
   3212 	case WM_T_82576:
   3213 	case WM_T_82580:
   3214 	case WM_T_I350:
   3215 	case WM_T_I354:
   3216 	case WM_T_I210:
   3217 	case WM_T_I211:
   3218 	case WM_T_80003:
   3219 	case WM_T_ICH8:
   3220 	case WM_T_ICH9:
   3221 		for (i = 0; i < 10; i++) {
   3222 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3223 				break;
   3224 			delay(1000);
   3225 		}
   3226 		if (i == 10) {
   3227 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3228 			    "complete\n", device_xname(sc->sc_dev));
   3229 		}
   3230 		break;
   3231 	default:
   3232 		break;
   3233 	}
   3234 }
   3235 
   3236 void
   3237 wm_lan_init_done(struct wm_softc *sc)
   3238 {
   3239 	uint32_t reg = 0;
   3240 	int i;
   3241 
   3242 	/* wait for eeprom to reload */
   3243 	switch (sc->sc_type) {
   3244 	case WM_T_ICH10:
   3245 	case WM_T_PCH:
   3246 	case WM_T_PCH2:
   3247 	case WM_T_PCH_LPT:
   3248 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3249 			reg = CSR_READ(sc, WMREG_STATUS);
   3250 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3251 				break;
   3252 			delay(100);
   3253 		}
   3254 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3255 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3256 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3257 		}
   3258 		break;
   3259 	default:
   3260 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3261 		    __func__);
   3262 		break;
   3263 	}
   3264 
   3265 	reg &= ~STATUS_LAN_INIT_DONE;
   3266 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3267 }
   3268 
   3269 void
   3270 wm_get_cfg_done(struct wm_softc *sc)
   3271 {
   3272 	int mask;
   3273 	uint32_t reg;
   3274 	int i;
   3275 
   3276 	/* wait for eeprom to reload */
   3277 	switch (sc->sc_type) {
   3278 	case WM_T_82542_2_0:
   3279 	case WM_T_82542_2_1:
   3280 		/* null */
   3281 		break;
   3282 	case WM_T_82543:
   3283 	case WM_T_82544:
   3284 	case WM_T_82540:
   3285 	case WM_T_82545:
   3286 	case WM_T_82545_3:
   3287 	case WM_T_82546:
   3288 	case WM_T_82546_3:
   3289 	case WM_T_82541:
   3290 	case WM_T_82541_2:
   3291 	case WM_T_82547:
   3292 	case WM_T_82547_2:
   3293 	case WM_T_82573:
   3294 	case WM_T_82574:
   3295 	case WM_T_82583:
   3296 		/* generic */
   3297 		delay(10*1000);
   3298 		break;
   3299 	case WM_T_80003:
   3300 	case WM_T_82571:
   3301 	case WM_T_82572:
   3302 	case WM_T_82575:
   3303 	case WM_T_82576:
   3304 	case WM_T_82580:
   3305 	case WM_T_I350:
   3306 	case WM_T_I354:
   3307 	case WM_T_I210:
   3308 	case WM_T_I211:
   3309 		if (sc->sc_type == WM_T_82571) {
   3310 			/* Only 82571 shares port 0 */
   3311 			mask = EEMNGCTL_CFGDONE_0;
   3312 		} else
   3313 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3314 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3315 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3316 				break;
   3317 			delay(1000);
   3318 		}
   3319 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3320 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3321 				device_xname(sc->sc_dev), __func__));
   3322 		}
   3323 		break;
   3324 	case WM_T_ICH8:
   3325 	case WM_T_ICH9:
   3326 	case WM_T_ICH10:
   3327 	case WM_T_PCH:
   3328 	case WM_T_PCH2:
   3329 	case WM_T_PCH_LPT:
   3330 		delay(10*1000);
   3331 		if (sc->sc_type >= WM_T_ICH10)
   3332 			wm_lan_init_done(sc);
   3333 		else
   3334 			wm_get_auto_rd_done(sc);
   3335 
   3336 		reg = CSR_READ(sc, WMREG_STATUS);
   3337 		if ((reg & STATUS_PHYRA) != 0)
   3338 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3339 		break;
   3340 	default:
   3341 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3342 		    __func__);
   3343 		break;
   3344 	}
   3345 }
   3346 
   3347 /* Init hardware bits */
   3348 void
   3349 wm_initialize_hardware_bits(struct wm_softc *sc)
   3350 {
   3351 	uint32_t tarc0, tarc1, reg;
   3352 
   3353 	/* For 82571 variant, 80003 and ICHs */
   3354 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3355 	    || (sc->sc_type >= WM_T_80003)) {
   3356 
   3357 		/* Transmit Descriptor Control 0 */
   3358 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3359 		reg |= TXDCTL_COUNT_DESC;
   3360 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3361 
   3362 		/* Transmit Descriptor Control 1 */
   3363 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3364 		reg |= TXDCTL_COUNT_DESC;
   3365 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3366 
   3367 		/* TARC0 */
   3368 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3369 		switch (sc->sc_type) {
   3370 		case WM_T_82571:
   3371 		case WM_T_82572:
   3372 		case WM_T_82573:
   3373 		case WM_T_82574:
   3374 		case WM_T_82583:
   3375 		case WM_T_80003:
   3376 			/* Clear bits 30..27 */
   3377 			tarc0 &= ~__BITS(30, 27);
   3378 			break;
   3379 		default:
   3380 			break;
   3381 		}
   3382 
   3383 		switch (sc->sc_type) {
   3384 		case WM_T_82571:
   3385 		case WM_T_82572:
   3386 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3387 
   3388 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3389 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3390 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3391 			/* 8257[12] Errata No.7 */
   3392 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3393 
   3394 			/* TARC1 bit 28 */
   3395 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3396 				tarc1 &= ~__BIT(28);
   3397 			else
   3398 				tarc1 |= __BIT(28);
   3399 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3400 
   3401 			/*
   3402 			 * 8257[12] Errata No.13
   3403 			 * Disable Dyamic Clock Gating.
   3404 			 */
   3405 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3406 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3407 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3408 			break;
   3409 		case WM_T_82573:
   3410 		case WM_T_82574:
   3411 		case WM_T_82583:
   3412 			if ((sc->sc_type == WM_T_82574)
   3413 			    || (sc->sc_type == WM_T_82583))
   3414 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3415 
   3416 			/* Extended Device Control */
   3417 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3418 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3419 			reg |= __BIT(22);	/* Set bit 22 */
   3420 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3421 
   3422 			/* Device Control */
   3423 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3424 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3425 
   3426 			/* PCIe Control Register */
   3427 			/*
   3428 			 * 82573 Errata (unknown).
   3429 			 *
   3430 			 * 82574 Errata 25 and 82583 Errata 12
   3431 			 * "Dropped Rx Packets":
   3432 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3433 			 */
   3434 			reg = CSR_READ(sc, WMREG_GCR);
   3435 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3436 			CSR_WRITE(sc, WMREG_GCR, reg);
   3437 
   3438 			if ((sc->sc_type == WM_T_82574)
   3439 			    || (sc->sc_type == WM_T_82583)) {
   3440 				/*
   3441 				 * Document says this bit must be set for
   3442 				 * proper operation.
   3443 				 */
   3444 				reg = CSR_READ(sc, WMREG_GCR);
   3445 				reg |= __BIT(22);
   3446 				CSR_WRITE(sc, WMREG_GCR, reg);
   3447 
   3448 				/*
   3449 				 * Apply workaround for hardware errata
   3450 				 * documented in errata docs Fixes issue where
   3451 				 * some error prone or unreliable PCIe
   3452 				 * completions are occurring, particularly
   3453 				 * with ASPM enabled. Without fix, issue can
   3454 				 * cause Tx timeouts.
   3455 				 */
   3456 				reg = CSR_READ(sc, WMREG_GCR2);
   3457 				reg |= __BIT(0);
   3458 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3459 			}
   3460 			break;
   3461 		case WM_T_80003:
   3462 			/* TARC0 */
   3463 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3464 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3465 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3466 
   3467 			/* TARC1 bit 28 */
   3468 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3469 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3470 				tarc1 &= ~__BIT(28);
   3471 			else
   3472 				tarc1 |= __BIT(28);
   3473 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3474 			break;
   3475 		case WM_T_ICH8:
   3476 		case WM_T_ICH9:
   3477 		case WM_T_ICH10:
   3478 		case WM_T_PCH:
   3479 		case WM_T_PCH2:
   3480 		case WM_T_PCH_LPT:
   3481 			/* TARC 0 */
   3482 			if (sc->sc_type == WM_T_ICH8) {
   3483 				/* Set TARC0 bits 29 and 28 */
   3484 				tarc0 |= __BITS(29, 28);
   3485 			}
   3486 			/* Set TARC0 bits 23,24,26,27 */
   3487 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3488 
   3489 			/* CTRL_EXT */
   3490 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3491 			reg |= __BIT(22);	/* Set bit 22 */
   3492 			/*
   3493 			 * Enable PHY low-power state when MAC is at D3
   3494 			 * w/o WoL
   3495 			 */
   3496 			if (sc->sc_type >= WM_T_PCH)
   3497 				reg |= CTRL_EXT_PHYPDEN;
   3498 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3499 
   3500 			/* TARC1 */
   3501 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3502 			/* bit 28 */
   3503 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3504 				tarc1 &= ~__BIT(28);
   3505 			else
   3506 				tarc1 |= __BIT(28);
   3507 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3508 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3509 
   3510 			/* Device Status */
   3511 			if (sc->sc_type == WM_T_ICH8) {
   3512 				reg = CSR_READ(sc, WMREG_STATUS);
   3513 				reg &= ~__BIT(31);
   3514 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3515 
   3516 			}
   3517 
   3518 			/*
   3519 			 * Work-around descriptor data corruption issue during
   3520 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3521 			 * capability.
   3522 			 */
   3523 			reg = CSR_READ(sc, WMREG_RFCTL);
   3524 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3525 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3526 			break;
   3527 		default:
   3528 			break;
   3529 		}
   3530 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3531 
   3532 		/*
   3533 		 * 8257[12] Errata No.52 and some others.
   3534 		 * Avoid RSS Hash Value bug.
   3535 		 */
   3536 		switch (sc->sc_type) {
   3537 		case WM_T_82571:
   3538 		case WM_T_82572:
   3539 		case WM_T_82573:
   3540 		case WM_T_80003:
   3541 		case WM_T_ICH8:
   3542 			reg = CSR_READ(sc, WMREG_RFCTL);
   3543 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3544 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3545 			break;
   3546 		default:
   3547 			break;
   3548 		}
   3549 	}
   3550 }
   3551 
   3552 static uint32_t
   3553 wm_rxpbs_adjust_82580(uint32_t val)
   3554 {
   3555 	uint32_t rv = 0;
   3556 
   3557 	if (val < __arraycount(wm_82580_rxpbs_table))
   3558 		rv = wm_82580_rxpbs_table[val];
   3559 
   3560 	return rv;
   3561 }
   3562 
   3563 /*
   3564  * wm_reset:
   3565  *
   3566  *	Reset the i82542 chip.
   3567  */
   3568 static void
   3569 wm_reset(struct wm_softc *sc)
   3570 {
   3571 	int phy_reset = 0;
   3572 	int i, error = 0;
   3573 	uint32_t reg, mask;
   3574 
   3575 	/*
   3576 	 * Allocate on-chip memory according to the MTU size.
   3577 	 * The Packet Buffer Allocation register must be written
   3578 	 * before the chip is reset.
   3579 	 */
   3580 	switch (sc->sc_type) {
   3581 	case WM_T_82547:
   3582 	case WM_T_82547_2:
   3583 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3584 		    PBA_22K : PBA_30K;
   3585 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3586 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3587 			txq->txq_fifo_head = 0;
   3588 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3589 			txq->txq_fifo_size =
   3590 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3591 			txq->txq_fifo_stall = 0;
   3592 		}
   3593 		break;
   3594 	case WM_T_82571:
   3595 	case WM_T_82572:
   3596 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3597 	case WM_T_80003:
   3598 		sc->sc_pba = PBA_32K;
   3599 		break;
   3600 	case WM_T_82573:
   3601 		sc->sc_pba = PBA_12K;
   3602 		break;
   3603 	case WM_T_82574:
   3604 	case WM_T_82583:
   3605 		sc->sc_pba = PBA_20K;
   3606 		break;
   3607 	case WM_T_82576:
   3608 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3609 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3610 		break;
   3611 	case WM_T_82580:
   3612 	case WM_T_I350:
   3613 	case WM_T_I354:
   3614 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3615 		break;
   3616 	case WM_T_I210:
   3617 	case WM_T_I211:
   3618 		sc->sc_pba = PBA_34K;
   3619 		break;
   3620 	case WM_T_ICH8:
   3621 		/* Workaround for a bit corruption issue in FIFO memory */
   3622 		sc->sc_pba = PBA_8K;
   3623 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3624 		break;
   3625 	case WM_T_ICH9:
   3626 	case WM_T_ICH10:
   3627 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3628 		    PBA_14K : PBA_10K;
   3629 		break;
   3630 	case WM_T_PCH:
   3631 	case WM_T_PCH2:
   3632 	case WM_T_PCH_LPT:
   3633 		sc->sc_pba = PBA_26K;
   3634 		break;
   3635 	default:
   3636 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3637 		    PBA_40K : PBA_48K;
   3638 		break;
   3639 	}
   3640 	/*
   3641 	 * Only old or non-multiqueue devices have the PBA register
   3642 	 * XXX Need special handling for 82575.
   3643 	 */
   3644 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3645 	    || (sc->sc_type == WM_T_82575))
   3646 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3647 
   3648 	/* Prevent the PCI-E bus from sticking */
   3649 	if (sc->sc_flags & WM_F_PCIE) {
   3650 		int timeout = 800;
   3651 
   3652 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3653 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3654 
   3655 		while (timeout--) {
   3656 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3657 			    == 0)
   3658 				break;
   3659 			delay(100);
   3660 		}
   3661 	}
   3662 
   3663 	/* Set the completion timeout for interface */
   3664 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3665 	    || (sc->sc_type == WM_T_82580)
   3666 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3667 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3668 		wm_set_pcie_completion_timeout(sc);
   3669 
   3670 	/* Clear interrupt */
   3671 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3672 	if (sc->sc_nintrs > 1) {
   3673 		if (sc->sc_type != WM_T_82574) {
   3674 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3675 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3676 		} else {
   3677 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3678 		}
   3679 	}
   3680 
   3681 	/* Stop the transmit and receive processes. */
   3682 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3683 	sc->sc_rctl &= ~RCTL_EN;
   3684 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3685 	CSR_WRITE_FLUSH(sc);
   3686 
   3687 	/* XXX set_tbi_sbp_82543() */
   3688 
   3689 	delay(10*1000);
   3690 
   3691 	/* Must acquire the MDIO ownership before MAC reset */
   3692 	switch (sc->sc_type) {
   3693 	case WM_T_82573:
   3694 	case WM_T_82574:
   3695 	case WM_T_82583:
   3696 		error = wm_get_hw_semaphore_82573(sc);
   3697 		break;
   3698 	default:
   3699 		break;
   3700 	}
   3701 
   3702 	/*
   3703 	 * 82541 Errata 29? & 82547 Errata 28?
   3704 	 * See also the description about PHY_RST bit in CTRL register
   3705 	 * in 8254x_GBe_SDM.pdf.
   3706 	 */
   3707 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3708 		CSR_WRITE(sc, WMREG_CTRL,
   3709 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3710 		CSR_WRITE_FLUSH(sc);
   3711 		delay(5000);
   3712 	}
   3713 
   3714 	switch (sc->sc_type) {
   3715 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3716 	case WM_T_82541:
   3717 	case WM_T_82541_2:
   3718 	case WM_T_82547:
   3719 	case WM_T_82547_2:
   3720 		/*
   3721 		 * On some chipsets, a reset through a memory-mapped write
   3722 		 * cycle can cause the chip to reset before completing the
   3723 		 * write cycle.  This causes major headache that can be
   3724 		 * avoided by issuing the reset via indirect register writes
   3725 		 * through I/O space.
   3726 		 *
   3727 		 * So, if we successfully mapped the I/O BAR at attach time,
   3728 		 * use that.  Otherwise, try our luck with a memory-mapped
   3729 		 * reset.
   3730 		 */
   3731 		if (sc->sc_flags & WM_F_IOH_VALID)
   3732 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3733 		else
   3734 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3735 		break;
   3736 	case WM_T_82545_3:
   3737 	case WM_T_82546_3:
   3738 		/* Use the shadow control register on these chips. */
   3739 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3740 		break;
   3741 	case WM_T_80003:
   3742 		mask = swfwphysem[sc->sc_funcid];
   3743 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3744 		wm_get_swfw_semaphore(sc, mask);
   3745 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3746 		wm_put_swfw_semaphore(sc, mask);
   3747 		break;
   3748 	case WM_T_ICH8:
   3749 	case WM_T_ICH9:
   3750 	case WM_T_ICH10:
   3751 	case WM_T_PCH:
   3752 	case WM_T_PCH2:
   3753 	case WM_T_PCH_LPT:
   3754 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3755 		if (wm_phy_resetisblocked(sc) == false) {
   3756 			/*
   3757 			 * Gate automatic PHY configuration by hardware on
   3758 			 * non-managed 82579
   3759 			 */
   3760 			if ((sc->sc_type == WM_T_PCH2)
   3761 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3762 				== 0))
   3763 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3764 
   3765 			reg |= CTRL_PHY_RESET;
   3766 			phy_reset = 1;
   3767 		}
   3768 		wm_get_swfwhw_semaphore(sc);
   3769 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3770 		/* Don't insert a completion barrier when reset */
   3771 		delay(20*1000);
   3772 		wm_put_swfwhw_semaphore(sc);
   3773 		break;
   3774 	case WM_T_82580:
   3775 	case WM_T_I350:
   3776 	case WM_T_I354:
   3777 	case WM_T_I210:
   3778 	case WM_T_I211:
   3779 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3780 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3781 			CSR_WRITE_FLUSH(sc);
   3782 		delay(5000);
   3783 		break;
   3784 	case WM_T_82542_2_0:
   3785 	case WM_T_82542_2_1:
   3786 	case WM_T_82543:
   3787 	case WM_T_82540:
   3788 	case WM_T_82545:
   3789 	case WM_T_82546:
   3790 	case WM_T_82571:
   3791 	case WM_T_82572:
   3792 	case WM_T_82573:
   3793 	case WM_T_82574:
   3794 	case WM_T_82575:
   3795 	case WM_T_82576:
   3796 	case WM_T_82583:
   3797 	default:
   3798 		/* Everything else can safely use the documented method. */
   3799 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3800 		break;
   3801 	}
   3802 
   3803 	/* Must release the MDIO ownership after MAC reset */
   3804 	switch (sc->sc_type) {
   3805 	case WM_T_82573:
   3806 	case WM_T_82574:
   3807 	case WM_T_82583:
   3808 		if (error == 0)
   3809 			wm_put_hw_semaphore_82573(sc);
   3810 		break;
   3811 	default:
   3812 		break;
   3813 	}
   3814 
   3815 	if (phy_reset != 0)
   3816 		wm_get_cfg_done(sc);
   3817 
   3818 	/* reload EEPROM */
   3819 	switch (sc->sc_type) {
   3820 	case WM_T_82542_2_0:
   3821 	case WM_T_82542_2_1:
   3822 	case WM_T_82543:
   3823 	case WM_T_82544:
   3824 		delay(10);
   3825 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3826 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3827 		CSR_WRITE_FLUSH(sc);
   3828 		delay(2000);
   3829 		break;
   3830 	case WM_T_82540:
   3831 	case WM_T_82545:
   3832 	case WM_T_82545_3:
   3833 	case WM_T_82546:
   3834 	case WM_T_82546_3:
   3835 		delay(5*1000);
   3836 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3837 		break;
   3838 	case WM_T_82541:
   3839 	case WM_T_82541_2:
   3840 	case WM_T_82547:
   3841 	case WM_T_82547_2:
   3842 		delay(20000);
   3843 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3844 		break;
   3845 	case WM_T_82571:
   3846 	case WM_T_82572:
   3847 	case WM_T_82573:
   3848 	case WM_T_82574:
   3849 	case WM_T_82583:
   3850 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3851 			delay(10);
   3852 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3853 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3854 			CSR_WRITE_FLUSH(sc);
   3855 		}
   3856 		/* check EECD_EE_AUTORD */
   3857 		wm_get_auto_rd_done(sc);
   3858 		/*
   3859 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3860 		 * is set.
   3861 		 */
   3862 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3863 		    || (sc->sc_type == WM_T_82583))
   3864 			delay(25*1000);
   3865 		break;
   3866 	case WM_T_82575:
   3867 	case WM_T_82576:
   3868 	case WM_T_82580:
   3869 	case WM_T_I350:
   3870 	case WM_T_I354:
   3871 	case WM_T_I210:
   3872 	case WM_T_I211:
   3873 	case WM_T_80003:
   3874 		/* check EECD_EE_AUTORD */
   3875 		wm_get_auto_rd_done(sc);
   3876 		break;
   3877 	case WM_T_ICH8:
   3878 	case WM_T_ICH9:
   3879 	case WM_T_ICH10:
   3880 	case WM_T_PCH:
   3881 	case WM_T_PCH2:
   3882 	case WM_T_PCH_LPT:
   3883 		break;
   3884 	default:
   3885 		panic("%s: unknown type\n", __func__);
   3886 	}
   3887 
   3888 	/* Check whether EEPROM is present or not */
   3889 	switch (sc->sc_type) {
   3890 	case WM_T_82575:
   3891 	case WM_T_82576:
   3892 	case WM_T_82580:
   3893 	case WM_T_I350:
   3894 	case WM_T_I354:
   3895 	case WM_T_ICH8:
   3896 	case WM_T_ICH9:
   3897 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3898 			/* Not found */
   3899 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3900 			if (sc->sc_type == WM_T_82575)
   3901 				wm_reset_init_script_82575(sc);
   3902 		}
   3903 		break;
   3904 	default:
   3905 		break;
   3906 	}
   3907 
   3908 	if ((sc->sc_type == WM_T_82580)
   3909 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3910 		/* clear global device reset status bit */
   3911 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3912 	}
   3913 
   3914 	/* Clear any pending interrupt events. */
   3915 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3916 	reg = CSR_READ(sc, WMREG_ICR);
   3917 	if (sc->sc_nintrs > 1) {
   3918 		if (sc->sc_type != WM_T_82574) {
   3919 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3920 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3921 		} else
   3922 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3923 	}
   3924 
   3925 	/* reload sc_ctrl */
   3926 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3927 
   3928 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3929 		wm_set_eee_i350(sc);
   3930 
   3931 	/* dummy read from WUC */
   3932 	if (sc->sc_type == WM_T_PCH)
   3933 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3934 	/*
   3935 	 * For PCH, this write will make sure that any noise will be detected
   3936 	 * as a CRC error and be dropped rather than show up as a bad packet
   3937 	 * to the DMA engine
   3938 	 */
   3939 	if (sc->sc_type == WM_T_PCH)
   3940 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3941 
   3942 	if (sc->sc_type >= WM_T_82544)
   3943 		CSR_WRITE(sc, WMREG_WUC, 0);
   3944 
   3945 	wm_reset_mdicnfg_82580(sc);
   3946 
   3947 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3948 		wm_pll_workaround_i210(sc);
   3949 }
   3950 
   3951 /*
   3952  * wm_add_rxbuf:
   3953  *
   3954  *	Add a receive buffer to the indiciated descriptor.
   3955  */
   3956 static int
   3957 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   3958 {
   3959 	struct wm_softc *sc = rxq->rxq_sc;
   3960 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   3961 	struct mbuf *m;
   3962 	int error;
   3963 
   3964 	KASSERT(WM_RX_LOCKED(rxq));
   3965 
   3966 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3967 	if (m == NULL)
   3968 		return ENOBUFS;
   3969 
   3970 	MCLGET(m, M_DONTWAIT);
   3971 	if ((m->m_flags & M_EXT) == 0) {
   3972 		m_freem(m);
   3973 		return ENOBUFS;
   3974 	}
   3975 
   3976 	if (rxs->rxs_mbuf != NULL)
   3977 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3978 
   3979 	rxs->rxs_mbuf = m;
   3980 
   3981 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3982 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3983 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   3984 	if (error) {
   3985 		/* XXX XXX XXX */
   3986 		aprint_error_dev(sc->sc_dev,
   3987 		    "unable to load rx DMA map %d, error = %d\n",
   3988 		    idx, error);
   3989 		panic("wm_add_rxbuf");
   3990 	}
   3991 
   3992 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3993 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3994 
   3995 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3996 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3997 			wm_init_rxdesc(rxq, idx);
   3998 	} else
   3999 		wm_init_rxdesc(rxq, idx);
   4000 
   4001 	return 0;
   4002 }
   4003 
   4004 /*
   4005  * wm_rxdrain:
   4006  *
   4007  *	Drain the receive queue.
   4008  */
   4009 static void
   4010 wm_rxdrain(struct wm_rxqueue *rxq)
   4011 {
   4012 	struct wm_softc *sc = rxq->rxq_sc;
   4013 	struct wm_rxsoft *rxs;
   4014 	int i;
   4015 
   4016 	KASSERT(WM_RX_LOCKED(rxq));
   4017 
   4018 	for (i = 0; i < WM_NRXDESC; i++) {
   4019 		rxs = &rxq->rxq_soft[i];
   4020 		if (rxs->rxs_mbuf != NULL) {
   4021 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4022 			m_freem(rxs->rxs_mbuf);
   4023 			rxs->rxs_mbuf = NULL;
   4024 		}
   4025 	}
   4026 }
   4027 
   4028 
   4029 /*
   4030  * XXX copy from FreeBSD's sys/net/rss_config.c
   4031  */
   4032 /*
   4033  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4034  * effectiveness may be limited by algorithm choice and available entropy
   4035  * during the boot.
   4036  *
   4037  * XXXRW: And that we don't randomize it yet!
   4038  *
   4039  * This is the default Microsoft RSS specification key which is also
   4040  * the Chelsio T5 firmware default key.
   4041  */
   4042 #define RSS_KEYSIZE 40
   4043 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4044 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4045 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4046 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4047 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4048 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4049 };
   4050 
   4051 /*
   4052  * Caller must pass an array of size sizeof(rss_key).
   4053  *
   4054  * XXX
   4055  * As if_ixgbe may use this function, this function should not be
   4056  * if_wm specific function.
   4057  */
   4058 static void
   4059 wm_rss_getkey(uint8_t *key)
   4060 {
   4061 
   4062 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4063 }
   4064 
   4065 /*
   4066  * Setup registers for RSS.
   4067  *
   4068  * XXX not yet VMDq support
   4069  */
   4070 static void
   4071 wm_init_rss(struct wm_softc *sc)
   4072 {
   4073 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4074 	int i;
   4075 
   4076 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4077 
   4078 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4079 		int qid, reta_ent;
   4080 
   4081 		qid  = i % sc->sc_nrxqueues;
   4082 		switch(sc->sc_type) {
   4083 		case WM_T_82574:
   4084 			reta_ent = __SHIFTIN(qid,
   4085 			    RETA_ENT_QINDEX_MASK_82574);
   4086 			break;
   4087 		case WM_T_82575:
   4088 			reta_ent = __SHIFTIN(qid,
   4089 			    RETA_ENT_QINDEX1_MASK_82575);
   4090 			break;
   4091 		default:
   4092 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4093 			break;
   4094 		}
   4095 
   4096 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4097 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4098 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4099 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4100 	}
   4101 
   4102 	wm_rss_getkey((uint8_t *)rss_key);
   4103 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4104 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4105 
   4106 	if (sc->sc_type == WM_T_82574)
   4107 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4108 	else
   4109 		mrqc = MRQC_ENABLE_RSS_MQ;
   4110 
   4111 	/* XXXX
   4112 	 * The same as FreeBSD igb.
   4113 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4114 	 */
   4115 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4116 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4117 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4118 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4119 
   4120 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4121 }
   4122 
   4123 /*
   4124  * Adjust TX and RX queue numbers which the system actulally uses.
   4125  *
   4126  * The numbers are affected by below parameters.
   4127  *     - The nubmer of hardware queues
   4128  *     - The number of MSI-X vectors (= "nvectors" argument)
   4129  *     - ncpu
   4130  */
   4131 static void
   4132 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4133 {
   4134 	int hw_ntxqueues, hw_nrxqueues;
   4135 
   4136 	if (nvectors < 3) {
   4137 		sc->sc_ntxqueues = 1;
   4138 		sc->sc_nrxqueues = 1;
   4139 		return;
   4140 	}
   4141 
   4142 	switch(sc->sc_type) {
   4143 	case WM_T_82572:
   4144 		hw_ntxqueues = 2;
   4145 		hw_nrxqueues = 2;
   4146 		break;
   4147 	case WM_T_82574:
   4148 		hw_ntxqueues = 2;
   4149 		hw_nrxqueues = 2;
   4150 		break;
   4151 	case WM_T_82575:
   4152 		hw_ntxqueues = 4;
   4153 		hw_nrxqueues = 4;
   4154 		break;
   4155 	case WM_T_82576:
   4156 		hw_ntxqueues = 16;
   4157 		hw_nrxqueues = 16;
   4158 		break;
   4159 	case WM_T_82580:
   4160 	case WM_T_I350:
   4161 	case WM_T_I354:
   4162 		hw_ntxqueues = 8;
   4163 		hw_nrxqueues = 8;
   4164 		break;
   4165 	case WM_T_I210:
   4166 		hw_ntxqueues = 4;
   4167 		hw_nrxqueues = 4;
   4168 		break;
   4169 	case WM_T_I211:
   4170 		hw_ntxqueues = 2;
   4171 		hw_nrxqueues = 2;
   4172 		break;
   4173 		/*
   4174 		 * As below ethernet controllers does not support MSI-X,
   4175 		 * this driver let them not use multiqueue.
   4176 		 *     - WM_T_80003
   4177 		 *     - WM_T_ICH8
   4178 		 *     - WM_T_ICH9
   4179 		 *     - WM_T_ICH10
   4180 		 *     - WM_T_PCH
   4181 		 *     - WM_T_PCH2
   4182 		 *     - WM_T_PCH_LPT
   4183 		 */
   4184 	default:
   4185 		hw_ntxqueues = 1;
   4186 		hw_nrxqueues = 1;
   4187 		break;
   4188 	}
   4189 
   4190 	/*
   4191 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4192 	 * the number of queues used actually.
   4193 	 *
   4194 	 * XXX
   4195 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4196 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4197 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4198 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4199 	 * such a way.
   4200 	 */
   4201 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4202 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4203 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4204 	} else {
   4205 		sc->sc_ntxqueues = hw_ntxqueues;
   4206 		sc->sc_nrxqueues = hw_nrxqueues;
   4207 	}
   4208 
   4209 	/*
   4210 	 * As queues more then cpus cannot improve scaling, we limit
   4211 	 * the number of queues used actually.
   4212 	 */
   4213 	if (ncpu < sc->sc_ntxqueues)
   4214 		sc->sc_ntxqueues = ncpu;
   4215 	if (ncpu < sc->sc_nrxqueues)
   4216 		sc->sc_nrxqueues = ncpu;
   4217 
   4218 	/* XXX Currently, this driver supports RX multiqueue only. */
   4219 	sc->sc_ntxqueues = 1;
   4220 }
   4221 
   4222 /*
   4223  * Both single interrupt MSI and INTx can use this function.
   4224  */
   4225 static int
   4226 wm_setup_legacy(struct wm_softc *sc)
   4227 {
   4228 	pci_chipset_tag_t pc = sc->sc_pc;
   4229 	const char *intrstr = NULL;
   4230 	char intrbuf[PCI_INTRSTR_LEN];
   4231 	int error;
   4232 
   4233 	error = wm_alloc_txrx_queues(sc);
   4234 	if (error) {
   4235 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4236 		    error);
   4237 		return ENOMEM;
   4238 	}
   4239 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4240 	    sizeof(intrbuf));
   4241 #ifdef WM_MPSAFE
   4242 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4243 #endif
   4244 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4245 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4246 	if (sc->sc_ihs[0] == NULL) {
   4247 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4248 		    (pci_intr_type(sc->sc_intrs[0])
   4249 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4250 		return ENOMEM;
   4251 	}
   4252 
   4253 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4254 	sc->sc_nintrs = 1;
   4255 	return 0;
   4256 }
   4257 
   4258 static int
   4259 wm_setup_msix(struct wm_softc *sc)
   4260 {
   4261 	void *vih;
   4262 	kcpuset_t *affinity;
   4263 	int qidx, error, intr_idx, tx_established, rx_established;
   4264 	pci_chipset_tag_t pc = sc->sc_pc;
   4265 	const char *intrstr = NULL;
   4266 	char intrbuf[PCI_INTRSTR_LEN];
   4267 	char intr_xname[INTRDEVNAMEBUF];
   4268 	/*
   4269 	 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
   4270 	 * start from CPU#1.
   4271 	 */
   4272 	int affinity_offset = 1;
   4273 
   4274 	error = wm_alloc_txrx_queues(sc);
   4275 	if (error) {
   4276 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4277 		    error);
   4278 		return ENOMEM;
   4279 	}
   4280 
   4281 	kcpuset_create(&affinity, false);
   4282 	intr_idx = 0;
   4283 
   4284 	/*
   4285 	 * TX
   4286 	 */
   4287 	tx_established = 0;
   4288 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4289 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4290 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4291 
   4292 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4293 		    sizeof(intrbuf));
   4294 #ifdef WM_MPSAFE
   4295 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4296 		    PCI_INTR_MPSAFE, true);
   4297 #endif
   4298 		memset(intr_xname, 0, sizeof(intr_xname));
   4299 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4300 		    device_xname(sc->sc_dev), qidx);
   4301 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4302 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4303 		if (vih == NULL) {
   4304 			aprint_error_dev(sc->sc_dev,
   4305 			    "unable to establish MSI-X(for TX)%s%s\n",
   4306 			    intrstr ? " at " : "",
   4307 			    intrstr ? intrstr : "");
   4308 
   4309 			goto fail_0;
   4310 		}
   4311 		kcpuset_zero(affinity);
   4312 		/* Round-robin affinity */
   4313 		kcpuset_set(affinity, affinity_to);
   4314 		error = interrupt_distribute(vih, affinity, NULL);
   4315 		if (error == 0) {
   4316 			aprint_normal_dev(sc->sc_dev,
   4317 			    "for TX interrupting at %s affinity to %u\n",
   4318 			    intrstr, affinity_to);
   4319 		} else {
   4320 			aprint_normal_dev(sc->sc_dev,
   4321 			    "for TX interrupting at %s\n", intrstr);
   4322 		}
   4323 		sc->sc_ihs[intr_idx] = vih;
   4324 		txq->txq_id = qidx;
   4325 		txq->txq_intr_idx = intr_idx;
   4326 
   4327 		tx_established++;
   4328 		intr_idx++;
   4329 	}
   4330 
   4331 	/*
   4332 	 * RX
   4333 	 */
   4334 	rx_established = 0;
   4335 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4336 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4337 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4338 
   4339 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4340 		    sizeof(intrbuf));
   4341 #ifdef WM_MPSAFE
   4342 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4343 		    PCI_INTR_MPSAFE, true);
   4344 #endif
   4345 		memset(intr_xname, 0, sizeof(intr_xname));
   4346 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4347 		    device_xname(sc->sc_dev), qidx);
   4348 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4349 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4350 		if (vih == NULL) {
   4351 			aprint_error_dev(sc->sc_dev,
   4352 			    "unable to establish MSI-X(for RX)%s%s\n",
   4353 			    intrstr ? " at " : "",
   4354 			    intrstr ? intrstr : "");
   4355 
   4356 			goto fail_1;
   4357 		}
   4358 		kcpuset_zero(affinity);
   4359 		/* Round-robin affinity */
   4360 		kcpuset_set(affinity, affinity_to);
   4361 		error = interrupt_distribute(vih, affinity, NULL);
   4362 		if (error == 0) {
   4363 			aprint_normal_dev(sc->sc_dev,
   4364 			    "for RX interrupting at %s affinity to %u\n",
   4365 			    intrstr, affinity_to);
   4366 		} else {
   4367 			aprint_normal_dev(sc->sc_dev,
   4368 			    "for RX interrupting at %s\n", intrstr);
   4369 		}
   4370 		sc->sc_ihs[intr_idx] = vih;
   4371 		rxq->rxq_id = qidx;
   4372 		rxq->rxq_intr_idx = intr_idx;
   4373 
   4374 		rx_established++;
   4375 		intr_idx++;
   4376 	}
   4377 
   4378 	/*
   4379 	 * LINK
   4380 	 */
   4381 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4382 	    sizeof(intrbuf));
   4383 #ifdef WM_MPSAFE
   4384 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4385 #endif
   4386 	memset(intr_xname, 0, sizeof(intr_xname));
   4387 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4388 	    device_xname(sc->sc_dev));
   4389 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4390 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4391 	if (vih == NULL) {
   4392 		aprint_error_dev(sc->sc_dev,
   4393 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4394 		    intrstr ? " at " : "",
   4395 		    intrstr ? intrstr : "");
   4396 
   4397 		goto fail_1;
   4398 	}
   4399 	/* keep default affinity to LINK interrupt */
   4400 	aprint_normal_dev(sc->sc_dev,
   4401 	    "for LINK interrupting at %s\n", intrstr);
   4402 	sc->sc_ihs[intr_idx] = vih;
   4403 	sc->sc_link_intr_idx = intr_idx;
   4404 
   4405 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4406 	kcpuset_destroy(affinity);
   4407 	return 0;
   4408 
   4409  fail_1:
   4410 	for (qidx = 0; qidx < rx_established; qidx++) {
   4411 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4412 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[rxq->rxq_intr_idx]);
   4413 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4414 	}
   4415  fail_0:
   4416 	for (qidx = 0; qidx < tx_established; qidx++) {
   4417 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4418 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[txq->txq_intr_idx]);
   4419 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4420 	}
   4421 
   4422 	kcpuset_destroy(affinity);
   4423 	return ENOMEM;
   4424 }
   4425 
   4426 /*
   4427  * wm_init:		[ifnet interface function]
   4428  *
   4429  *	Initialize the interface.
   4430  */
   4431 static int
   4432 wm_init(struct ifnet *ifp)
   4433 {
   4434 	struct wm_softc *sc = ifp->if_softc;
   4435 	int ret;
   4436 
   4437 	WM_CORE_LOCK(sc);
   4438 	ret = wm_init_locked(ifp);
   4439 	WM_CORE_UNLOCK(sc);
   4440 
   4441 	return ret;
   4442 }
   4443 
   4444 static int
   4445 wm_init_locked(struct ifnet *ifp)
   4446 {
   4447 	struct wm_softc *sc = ifp->if_softc;
   4448 	int i, j, trynum, error = 0;
   4449 	uint32_t reg;
   4450 
   4451 	KASSERT(WM_CORE_LOCKED(sc));
   4452 	/*
   4453 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4454 	 * There is a small but measurable benefit to avoiding the adjusment
   4455 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4456 	 * on such platforms.  One possibility is that the DMA itself is
   4457 	 * slightly more efficient if the front of the entire packet (instead
   4458 	 * of the front of the headers) is aligned.
   4459 	 *
   4460 	 * Note we must always set align_tweak to 0 if we are using
   4461 	 * jumbo frames.
   4462 	 */
   4463 #ifdef __NO_STRICT_ALIGNMENT
   4464 	sc->sc_align_tweak = 0;
   4465 #else
   4466 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4467 		sc->sc_align_tweak = 0;
   4468 	else
   4469 		sc->sc_align_tweak = 2;
   4470 #endif /* __NO_STRICT_ALIGNMENT */
   4471 
   4472 	/* Cancel any pending I/O. */
   4473 	wm_stop_locked(ifp, 0);
   4474 
   4475 	/* update statistics before reset */
   4476 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4477 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4478 
   4479 	/* Reset the chip to a known state. */
   4480 	wm_reset(sc);
   4481 
   4482 	switch (sc->sc_type) {
   4483 	case WM_T_82571:
   4484 	case WM_T_82572:
   4485 	case WM_T_82573:
   4486 	case WM_T_82574:
   4487 	case WM_T_82583:
   4488 	case WM_T_80003:
   4489 	case WM_T_ICH8:
   4490 	case WM_T_ICH9:
   4491 	case WM_T_ICH10:
   4492 	case WM_T_PCH:
   4493 	case WM_T_PCH2:
   4494 	case WM_T_PCH_LPT:
   4495 		/* AMT based hardware can now take control from firmware */
   4496 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4497 			wm_get_hw_control(sc);
   4498 		break;
   4499 	default:
   4500 		break;
   4501 	}
   4502 
   4503 	/* Init hardware bits */
   4504 	wm_initialize_hardware_bits(sc);
   4505 
   4506 	/* Reset the PHY. */
   4507 	if (sc->sc_flags & WM_F_HAS_MII)
   4508 		wm_gmii_reset(sc);
   4509 
   4510 	/* Calculate (E)ITR value */
   4511 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4512 		sc->sc_itr = 450;	/* For EITR */
   4513 	} else if (sc->sc_type >= WM_T_82543) {
   4514 		/*
   4515 		 * Set up the interrupt throttling register (units of 256ns)
   4516 		 * Note that a footnote in Intel's documentation says this
   4517 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4518 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4519 		 * that that is also true for the 1024ns units of the other
   4520 		 * interrupt-related timer registers -- so, really, we ought
   4521 		 * to divide this value by 4 when the link speed is low.
   4522 		 *
   4523 		 * XXX implement this division at link speed change!
   4524 		 */
   4525 
   4526 		/*
   4527 		 * For N interrupts/sec, set this value to:
   4528 		 * 1000000000 / (N * 256).  Note that we set the
   4529 		 * absolute and packet timer values to this value
   4530 		 * divided by 4 to get "simple timer" behavior.
   4531 		 */
   4532 
   4533 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4534 	}
   4535 
   4536 	error = wm_init_txrx_queues(sc);
   4537 	if (error)
   4538 		goto out;
   4539 
   4540 	/*
   4541 	 * Clear out the VLAN table -- we don't use it (yet).
   4542 	 */
   4543 	CSR_WRITE(sc, WMREG_VET, 0);
   4544 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4545 		trynum = 10; /* Due to hw errata */
   4546 	else
   4547 		trynum = 1;
   4548 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4549 		for (j = 0; j < trynum; j++)
   4550 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4551 
   4552 	/*
   4553 	 * Set up flow-control parameters.
   4554 	 *
   4555 	 * XXX Values could probably stand some tuning.
   4556 	 */
   4557 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4558 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4559 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4560 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4561 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4562 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4563 	}
   4564 
   4565 	sc->sc_fcrtl = FCRTL_DFLT;
   4566 	if (sc->sc_type < WM_T_82543) {
   4567 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4568 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4569 	} else {
   4570 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4571 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4572 	}
   4573 
   4574 	if (sc->sc_type == WM_T_80003)
   4575 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4576 	else
   4577 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4578 
   4579 	/* Writes the control register. */
   4580 	wm_set_vlan(sc);
   4581 
   4582 	if (sc->sc_flags & WM_F_HAS_MII) {
   4583 		int val;
   4584 
   4585 		switch (sc->sc_type) {
   4586 		case WM_T_80003:
   4587 		case WM_T_ICH8:
   4588 		case WM_T_ICH9:
   4589 		case WM_T_ICH10:
   4590 		case WM_T_PCH:
   4591 		case WM_T_PCH2:
   4592 		case WM_T_PCH_LPT:
   4593 			/*
   4594 			 * Set the mac to wait the maximum time between each
   4595 			 * iteration and increase the max iterations when
   4596 			 * polling the phy; this fixes erroneous timeouts at
   4597 			 * 10Mbps.
   4598 			 */
   4599 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4600 			    0xFFFF);
   4601 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4602 			val |= 0x3F;
   4603 			wm_kmrn_writereg(sc,
   4604 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4605 			break;
   4606 		default:
   4607 			break;
   4608 		}
   4609 
   4610 		if (sc->sc_type == WM_T_80003) {
   4611 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4612 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4613 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4614 
   4615 			/* Bypass RX and TX FIFO's */
   4616 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4617 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4618 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4619 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4620 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4621 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4622 		}
   4623 	}
   4624 #if 0
   4625 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4626 #endif
   4627 
   4628 	/* Set up checksum offload parameters. */
   4629 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4630 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4631 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4632 		reg |= RXCSUM_IPOFL;
   4633 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4634 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4635 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4636 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4637 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4638 
   4639 	/* Set up MSI-X */
   4640 	if (sc->sc_nintrs > 1) {
   4641 		uint32_t ivar;
   4642 		struct wm_txqueue *txq;
   4643 		struct wm_rxqueue *rxq;
   4644 		int qid;
   4645 
   4646 		if (sc->sc_type == WM_T_82575) {
   4647 			/* Interrupt control */
   4648 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4649 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4650 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4651 
   4652 			/* TX */
   4653 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4654 				txq = &sc->sc_txq[i];
   4655 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4656 				    EITR_TX_QUEUE(txq->txq_id));
   4657 			}
   4658 			/* RX */
   4659 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4660 				rxq = &sc->sc_rxq[i];
   4661 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4662 				    EITR_RX_QUEUE(rxq->rxq_id));
   4663 			}
   4664 			/* Link status */
   4665 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4666 			    EITR_OTHER);
   4667 		} else if (sc->sc_type == WM_T_82574) {
   4668 			/* Interrupt control */
   4669 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4670 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4671 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4672 
   4673 			ivar = 0;
   4674 			/* TX */
   4675 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4676 				txq = &sc->sc_txq[i];
   4677 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4678 					| txq->txq_intr_idx),
   4679 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4680 			}
   4681 			/* RX */
   4682 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4683 				rxq = &sc->sc_rxq[i];
   4684 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4685 					| rxq->rxq_intr_idx),
   4686 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4687 			}
   4688 			/* Link status */
   4689 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4690 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4691 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4692 		} else {
   4693 			/* Interrupt control */
   4694 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4695 			    | GPIE_EIAME | GPIE_PBA);
   4696 
   4697 			switch (sc->sc_type) {
   4698 			case WM_T_82580:
   4699 			case WM_T_I350:
   4700 			case WM_T_I354:
   4701 			case WM_T_I210:
   4702 			case WM_T_I211:
   4703 				/* TX */
   4704 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4705 					txq = &sc->sc_txq[i];
   4706 					qid = txq->txq_id;
   4707 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4708 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4709 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4710 						| IVAR_VALID),
   4711 					    IVAR_TX_MASK_Q(qid));
   4712 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4713 				}
   4714 
   4715 				/* RX */
   4716 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4717 					rxq = &sc->sc_rxq[i];
   4718 					qid = rxq->rxq_id;
   4719 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4720 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4721 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4722 						| IVAR_VALID),
   4723 					    IVAR_RX_MASK_Q(qid));
   4724 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4725 				}
   4726 				break;
   4727 			case WM_T_82576:
   4728 				/* TX */
   4729 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4730 					txq = &sc->sc_txq[i];
   4731 					qid = txq->txq_id;
   4732 					ivar = CSR_READ(sc,
   4733 					    WMREG_IVAR_Q_82576(qid));
   4734 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4735 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4736 						| IVAR_VALID),
   4737 					    IVAR_TX_MASK_Q_82576(qid));
   4738 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4739 					    ivar);
   4740 				}
   4741 
   4742 				/* RX */
   4743 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4744 					rxq = &sc->sc_rxq[i];
   4745 					qid = rxq->rxq_id;
   4746 					ivar = CSR_READ(sc,
   4747 					    WMREG_IVAR_Q_82576(qid));
   4748 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4749 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4750 						| IVAR_VALID),
   4751 					    IVAR_RX_MASK_Q_82576(qid));
   4752 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4753 					    ivar);
   4754 				}
   4755 				break;
   4756 			default:
   4757 				break;
   4758 			}
   4759 
   4760 			/* Link status */
   4761 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4762 			    IVAR_MISC_OTHER);
   4763 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4764 		}
   4765 
   4766 		if (sc->sc_nrxqueues > 1) {
   4767 			wm_init_rss(sc);
   4768 
   4769 			/*
   4770 			** NOTE: Receive Full-Packet Checksum Offload
   4771 			** is mutually exclusive with Multiqueue. However
   4772 			** this is not the same as TCP/IP checksums which
   4773 			** still work.
   4774 			*/
   4775 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4776 			reg |= RXCSUM_PCSD;
   4777 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4778 		}
   4779 	}
   4780 
   4781 	/* Set up the interrupt registers. */
   4782 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4783 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4784 	    ICR_RXO | ICR_RXT0;
   4785 	if (sc->sc_nintrs > 1) {
   4786 		uint32_t mask;
   4787 		struct wm_txqueue *txq;
   4788 		struct wm_rxqueue *rxq;
   4789 
   4790 		switch (sc->sc_type) {
   4791 		case WM_T_82574:
   4792 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4793 			    WMREG_EIAC_82574_MSIX_MASK);
   4794 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4795 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4796 			break;
   4797 		default:
   4798 			if (sc->sc_type == WM_T_82575) {
   4799 				mask = 0;
   4800 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4801 					txq = &sc->sc_txq[i];
   4802 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4803 				}
   4804 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4805 					rxq = &sc->sc_rxq[i];
   4806 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4807 				}
   4808 				mask |= EITR_OTHER;
   4809 			} else {
   4810 				mask = 0;
   4811 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4812 					txq = &sc->sc_txq[i];
   4813 					mask |= 1 << txq->txq_intr_idx;
   4814 				}
   4815 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4816 					rxq = &sc->sc_rxq[i];
   4817 					mask |= 1 << rxq->rxq_intr_idx;
   4818 				}
   4819 				mask |= 1 << sc->sc_link_intr_idx;
   4820 			}
   4821 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4822 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4823 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4824 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4825 			break;
   4826 		}
   4827 	} else
   4828 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4829 
   4830 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4831 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4832 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4833 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4834 		reg |= KABGTXD_BGSQLBIAS;
   4835 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4836 	}
   4837 
   4838 	/* Set up the inter-packet gap. */
   4839 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4840 
   4841 	if (sc->sc_type >= WM_T_82543) {
   4842 		/*
   4843 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4844 		 * the multi queue function with MSI-X.
   4845 		 */
   4846 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4847 			int qidx;
   4848 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4849 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4850 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4851 				    sc->sc_itr);
   4852 			}
   4853 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4854 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4855 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4856 				    sc->sc_itr);
   4857 			}
   4858 			/*
   4859 			 * Link interrupts occur much less than TX
   4860 			 * interrupts and RX interrupts. So, we don't
   4861 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4862 			 * FreeBSD's if_igb.
   4863 			 */
   4864 		} else
   4865 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4866 	}
   4867 
   4868 	/* Set the VLAN ethernetype. */
   4869 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4870 
   4871 	/*
   4872 	 * Set up the transmit control register; we start out with
   4873 	 * a collision distance suitable for FDX, but update it whe
   4874 	 * we resolve the media type.
   4875 	 */
   4876 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4877 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4878 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4879 	if (sc->sc_type >= WM_T_82571)
   4880 		sc->sc_tctl |= TCTL_MULR;
   4881 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4882 
   4883 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4884 		/* Write TDT after TCTL.EN is set. See the document. */
   4885 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4886 	}
   4887 
   4888 	if (sc->sc_type == WM_T_80003) {
   4889 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4890 		reg &= ~TCTL_EXT_GCEX_MASK;
   4891 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4892 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4893 	}
   4894 
   4895 	/* Set the media. */
   4896 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4897 		goto out;
   4898 
   4899 	/* Configure for OS presence */
   4900 	wm_init_manageability(sc);
   4901 
   4902 	/*
   4903 	 * Set up the receive control register; we actually program
   4904 	 * the register when we set the receive filter.  Use multicast
   4905 	 * address offset type 0.
   4906 	 *
   4907 	 * Only the i82544 has the ability to strip the incoming
   4908 	 * CRC, so we don't enable that feature.
   4909 	 */
   4910 	sc->sc_mchash_type = 0;
   4911 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4912 	    | RCTL_MO(sc->sc_mchash_type);
   4913 
   4914 	/*
   4915 	 * The I350 has a bug where it always strips the CRC whether
   4916 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4917 	 */
   4918 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4919 	    || (sc->sc_type == WM_T_I210))
   4920 		sc->sc_rctl |= RCTL_SECRC;
   4921 
   4922 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4923 	    && (ifp->if_mtu > ETHERMTU)) {
   4924 		sc->sc_rctl |= RCTL_LPE;
   4925 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4926 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4927 	}
   4928 
   4929 	if (MCLBYTES == 2048) {
   4930 		sc->sc_rctl |= RCTL_2k;
   4931 	} else {
   4932 		if (sc->sc_type >= WM_T_82543) {
   4933 			switch (MCLBYTES) {
   4934 			case 4096:
   4935 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4936 				break;
   4937 			case 8192:
   4938 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4939 				break;
   4940 			case 16384:
   4941 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4942 				break;
   4943 			default:
   4944 				panic("wm_init: MCLBYTES %d unsupported",
   4945 				    MCLBYTES);
   4946 				break;
   4947 			}
   4948 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4949 	}
   4950 
   4951 	/* Set the receive filter. */
   4952 	wm_set_filter(sc);
   4953 
   4954 	/* Enable ECC */
   4955 	switch (sc->sc_type) {
   4956 	case WM_T_82571:
   4957 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4958 		reg |= PBA_ECC_CORR_EN;
   4959 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4960 		break;
   4961 	case WM_T_PCH_LPT:
   4962 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4963 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4964 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4965 
   4966 		reg = CSR_READ(sc, WMREG_CTRL);
   4967 		reg |= CTRL_MEHE;
   4968 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4969 		break;
   4970 	default:
   4971 		break;
   4972 	}
   4973 
   4974 	/* On 575 and later set RDT only if RX enabled */
   4975 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4976 		int qidx;
   4977 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4978 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4979 			for (i = 0; i < WM_NRXDESC; i++) {
   4980 				WM_RX_LOCK(rxq);
   4981 				wm_init_rxdesc(rxq, i);
   4982 				WM_RX_UNLOCK(rxq);
   4983 
   4984 			}
   4985 		}
   4986 	}
   4987 
   4988 	sc->sc_stopping = false;
   4989 
   4990 	/* Start the one second link check clock. */
   4991 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4992 
   4993 	/* ...all done! */
   4994 	ifp->if_flags |= IFF_RUNNING;
   4995 	ifp->if_flags &= ~IFF_OACTIVE;
   4996 
   4997  out:
   4998 	sc->sc_if_flags = ifp->if_flags;
   4999 	if (error)
   5000 		log(LOG_ERR, "%s: interface not running\n",
   5001 		    device_xname(sc->sc_dev));
   5002 	return error;
   5003 }
   5004 
   5005 /*
   5006  * wm_stop:		[ifnet interface function]
   5007  *
   5008  *	Stop transmission on the interface.
   5009  */
   5010 static void
   5011 wm_stop(struct ifnet *ifp, int disable)
   5012 {
   5013 	struct wm_softc *sc = ifp->if_softc;
   5014 
   5015 	WM_CORE_LOCK(sc);
   5016 	wm_stop_locked(ifp, disable);
   5017 	WM_CORE_UNLOCK(sc);
   5018 }
   5019 
   5020 static void
   5021 wm_stop_locked(struct ifnet *ifp, int disable)
   5022 {
   5023 	struct wm_softc *sc = ifp->if_softc;
   5024 	struct wm_txsoft *txs;
   5025 	int i, qidx;
   5026 
   5027 	KASSERT(WM_CORE_LOCKED(sc));
   5028 
   5029 	sc->sc_stopping = true;
   5030 
   5031 	/* Stop the one second clock. */
   5032 	callout_stop(&sc->sc_tick_ch);
   5033 
   5034 	/* Stop the 82547 Tx FIFO stall check timer. */
   5035 	if (sc->sc_type == WM_T_82547)
   5036 		callout_stop(&sc->sc_txfifo_ch);
   5037 
   5038 	if (sc->sc_flags & WM_F_HAS_MII) {
   5039 		/* Down the MII. */
   5040 		mii_down(&sc->sc_mii);
   5041 	} else {
   5042 #if 0
   5043 		/* Should we clear PHY's status properly? */
   5044 		wm_reset(sc);
   5045 #endif
   5046 	}
   5047 
   5048 	/* Stop the transmit and receive processes. */
   5049 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5050 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5051 	sc->sc_rctl &= ~RCTL_EN;
   5052 
   5053 	/*
   5054 	 * Clear the interrupt mask to ensure the device cannot assert its
   5055 	 * interrupt line.
   5056 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5057 	 * service any currently pending or shared interrupt.
   5058 	 */
   5059 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5060 	sc->sc_icr = 0;
   5061 	if (sc->sc_nintrs > 1) {
   5062 		if (sc->sc_type != WM_T_82574) {
   5063 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5064 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5065 		} else
   5066 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5067 	}
   5068 
   5069 	/* Release any queued transmit buffers. */
   5070 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5071 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5072 		WM_TX_LOCK(txq);
   5073 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5074 			txs = &txq->txq_soft[i];
   5075 			if (txs->txs_mbuf != NULL) {
   5076 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5077 				m_freem(txs->txs_mbuf);
   5078 				txs->txs_mbuf = NULL;
   5079 			}
   5080 		}
   5081 		WM_TX_UNLOCK(txq);
   5082 	}
   5083 
   5084 	/* Mark the interface as down and cancel the watchdog timer. */
   5085 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5086 	ifp->if_timer = 0;
   5087 
   5088 	if (disable) {
   5089 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5090 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5091 			WM_RX_LOCK(rxq);
   5092 			wm_rxdrain(rxq);
   5093 			WM_RX_UNLOCK(rxq);
   5094 		}
   5095 	}
   5096 
   5097 #if 0 /* notyet */
   5098 	if (sc->sc_type >= WM_T_82544)
   5099 		CSR_WRITE(sc, WMREG_WUC, 0);
   5100 #endif
   5101 }
   5102 
   5103 static void
   5104 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5105 {
   5106 	struct mbuf *m;
   5107 	int i;
   5108 
   5109 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5110 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5111 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5112 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5113 		    m->m_data, m->m_len, m->m_flags);
   5114 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5115 	    i, i == 1 ? "" : "s");
   5116 }
   5117 
   5118 /*
   5119  * wm_82547_txfifo_stall:
   5120  *
   5121  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5122  *	reset the FIFO pointers, and restart packet transmission.
   5123  */
   5124 static void
   5125 wm_82547_txfifo_stall(void *arg)
   5126 {
   5127 	struct wm_softc *sc = arg;
   5128 	struct wm_txqueue *txq = sc->sc_txq;
   5129 #ifndef WM_MPSAFE
   5130 	int s;
   5131 
   5132 	s = splnet();
   5133 #endif
   5134 	WM_TX_LOCK(txq);
   5135 
   5136 	if (sc->sc_stopping)
   5137 		goto out;
   5138 
   5139 	if (txq->txq_fifo_stall) {
   5140 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5141 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5142 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5143 			/*
   5144 			 * Packets have drained.  Stop transmitter, reset
   5145 			 * FIFO pointers, restart transmitter, and kick
   5146 			 * the packet queue.
   5147 			 */
   5148 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5149 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5150 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5151 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5152 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5153 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5154 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5155 			CSR_WRITE_FLUSH(sc);
   5156 
   5157 			txq->txq_fifo_head = 0;
   5158 			txq->txq_fifo_stall = 0;
   5159 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5160 		} else {
   5161 			/*
   5162 			 * Still waiting for packets to drain; try again in
   5163 			 * another tick.
   5164 			 */
   5165 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5166 		}
   5167 	}
   5168 
   5169 out:
   5170 	WM_TX_UNLOCK(txq);
   5171 #ifndef WM_MPSAFE
   5172 	splx(s);
   5173 #endif
   5174 }
   5175 
   5176 /*
   5177  * wm_82547_txfifo_bugchk:
   5178  *
   5179  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5180  *	prevent enqueueing a packet that would wrap around the end
   5181  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5182  *
   5183  *	We do this by checking the amount of space before the end
   5184  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5185  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5186  *	the internal FIFO pointers to the beginning, and restart
   5187  *	transmission on the interface.
   5188  */
   5189 #define	WM_FIFO_HDR		0x10
   5190 #define	WM_82547_PAD_LEN	0x3e0
   5191 static int
   5192 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5193 {
   5194 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5195 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5196 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5197 
   5198 	/* Just return if already stalled. */
   5199 	if (txq->txq_fifo_stall)
   5200 		return 1;
   5201 
   5202 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5203 		/* Stall only occurs in half-duplex mode. */
   5204 		goto send_packet;
   5205 	}
   5206 
   5207 	if (len >= WM_82547_PAD_LEN + space) {
   5208 		txq->txq_fifo_stall = 1;
   5209 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5210 		return 1;
   5211 	}
   5212 
   5213  send_packet:
   5214 	txq->txq_fifo_head += len;
   5215 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5216 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5217 
   5218 	return 0;
   5219 }
   5220 
   5221 static int
   5222 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5223 {
   5224 	int error;
   5225 
   5226 	/*
   5227 	 * Allocate the control data structures, and create and load the
   5228 	 * DMA map for it.
   5229 	 *
   5230 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5231 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5232 	 * both sets within the same 4G segment.
   5233 	 */
   5234 	if (sc->sc_type < WM_T_82544) {
   5235 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5236 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) *WM_NTXDESC(txq);
   5237 	} else {
   5238 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5239 		txq->txq_desc_size = sizeof(txdescs_t);
   5240 	}
   5241 
   5242 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size,
   5243 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5244 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5245 		aprint_error_dev(sc->sc_dev,
   5246 		    "unable to allocate TX control data, error = %d\n",
   5247 		    error);
   5248 		goto fail_0;
   5249 	}
   5250 
   5251 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5252 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5253 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5254 		aprint_error_dev(sc->sc_dev,
   5255 		    "unable to map TX control data, error = %d\n", error);
   5256 		goto fail_1;
   5257 	}
   5258 
   5259 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5260 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5261 		aprint_error_dev(sc->sc_dev,
   5262 		    "unable to create TX control data DMA map, error = %d\n",
   5263 		    error);
   5264 		goto fail_2;
   5265 	}
   5266 
   5267 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5268 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5269 		aprint_error_dev(sc->sc_dev,
   5270 		    "unable to load TX control data DMA map, error = %d\n",
   5271 		    error);
   5272 		goto fail_3;
   5273 	}
   5274 
   5275 	return 0;
   5276 
   5277  fail_3:
   5278 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5279  fail_2:
   5280 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5281 	    txq->txq_desc_size);
   5282  fail_1:
   5283 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5284  fail_0:
   5285 	return error;
   5286 }
   5287 
   5288 static void
   5289 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5290 {
   5291 
   5292 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5293 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5294 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5295 	    txq->txq_desc_size);
   5296 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5297 }
   5298 
   5299 static int
   5300 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5301 {
   5302 	int error;
   5303 
   5304 	/*
   5305 	 * Allocate the control data structures, and create and load the
   5306 	 * DMA map for it.
   5307 	 *
   5308 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5309 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5310 	 * both sets within the same 4G segment.
   5311 	 */
   5312 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5313 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5314 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5315 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5316 		aprint_error_dev(sc->sc_dev,
   5317 		    "unable to allocate RX control data, error = %d\n",
   5318 		    error);
   5319 		goto fail_0;
   5320 	}
   5321 
   5322 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5323 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5324 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5325 		aprint_error_dev(sc->sc_dev,
   5326 		    "unable to map RX control data, error = %d\n", error);
   5327 		goto fail_1;
   5328 	}
   5329 
   5330 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5331 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5332 		aprint_error_dev(sc->sc_dev,
   5333 		    "unable to create RX control data DMA map, error = %d\n",
   5334 		    error);
   5335 		goto fail_2;
   5336 	}
   5337 
   5338 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5339 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5340 		aprint_error_dev(sc->sc_dev,
   5341 		    "unable to load RX control data DMA map, error = %d\n",
   5342 		    error);
   5343 		goto fail_3;
   5344 	}
   5345 
   5346 	return 0;
   5347 
   5348  fail_3:
   5349 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5350  fail_2:
   5351 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5352 	    rxq->rxq_desc_size);
   5353  fail_1:
   5354 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5355  fail_0:
   5356 	return error;
   5357 }
   5358 
   5359 static void
   5360 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5361 {
   5362 
   5363 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5364 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5365 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5366 	    rxq->rxq_desc_size);
   5367 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5368 }
   5369 
   5370 
   5371 static int
   5372 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5373 {
   5374 	int i, error;
   5375 
   5376 	/* Create the transmit buffer DMA maps. */
   5377 	WM_TXQUEUELEN(txq) =
   5378 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5379 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5380 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5381 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5382 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5383 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5384 			aprint_error_dev(sc->sc_dev,
   5385 			    "unable to create Tx DMA map %d, error = %d\n",
   5386 			    i, error);
   5387 			goto fail;
   5388 		}
   5389 	}
   5390 
   5391 	return 0;
   5392 
   5393  fail:
   5394 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5395 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5396 			bus_dmamap_destroy(sc->sc_dmat,
   5397 			    txq->txq_soft[i].txs_dmamap);
   5398 	}
   5399 	return error;
   5400 }
   5401 
   5402 static void
   5403 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5404 {
   5405 	int i;
   5406 
   5407 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5408 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5409 			bus_dmamap_destroy(sc->sc_dmat,
   5410 			    txq->txq_soft[i].txs_dmamap);
   5411 	}
   5412 }
   5413 
   5414 static int
   5415 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5416 {
   5417 	int i, error;
   5418 
   5419 	/* Create the receive buffer DMA maps. */
   5420 	for (i = 0; i < WM_NRXDESC; i++) {
   5421 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5422 			    MCLBYTES, 0, 0,
   5423 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5424 			aprint_error_dev(sc->sc_dev,
   5425 			    "unable to create Rx DMA map %d error = %d\n",
   5426 			    i, error);
   5427 			goto fail;
   5428 		}
   5429 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5430 	}
   5431 
   5432 	return 0;
   5433 
   5434  fail:
   5435 	for (i = 0; i < WM_NRXDESC; i++) {
   5436 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5437 			bus_dmamap_destroy(sc->sc_dmat,
   5438 			    rxq->rxq_soft[i].rxs_dmamap);
   5439 	}
   5440 	return error;
   5441 }
   5442 
   5443 static void
   5444 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5445 {
   5446 	int i;
   5447 
   5448 	for (i = 0; i < WM_NRXDESC; i++) {
   5449 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5450 			bus_dmamap_destroy(sc->sc_dmat,
   5451 			    rxq->rxq_soft[i].rxs_dmamap);
   5452 	}
   5453 }
   5454 
   5455 /*
   5456  * wm_alloc_quques:
   5457  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5458  */
   5459 static int
   5460 wm_alloc_txrx_queues(struct wm_softc *sc)
   5461 {
   5462 	int i, error, tx_done, rx_done;
   5463 
   5464 	/*
   5465 	 * For transmission
   5466 	 */
   5467 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5468 	    KM_SLEEP);
   5469 	if (sc->sc_txq == NULL) {
   5470 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_txqueue\n");
   5471 		error = ENOMEM;
   5472 		goto fail_0;
   5473 	}
   5474 
   5475 	error = 0;
   5476 	tx_done = 0;
   5477 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5478 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5479 		txq->txq_sc = sc;
   5480 #ifdef WM_MPSAFE
   5481 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5482 #else
   5483 		txq->txq_lock = NULL;
   5484 #endif
   5485 		error = wm_alloc_tx_descs(sc, txq);
   5486 		if (error)
   5487 			break;
   5488 		error = wm_alloc_tx_buffer(sc, txq);
   5489 		if (error) {
   5490 			wm_free_tx_descs(sc, txq);
   5491 			break;
   5492 		}
   5493 		tx_done++;
   5494 	}
   5495 	if (error)
   5496 		goto fail_1;
   5497 
   5498 	/*
   5499 	 * For recieve
   5500 	 */
   5501 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5502 	    KM_SLEEP);
   5503 	if (sc->sc_rxq == NULL) {
   5504 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_rxqueue\n");
   5505 		error = ENOMEM;
   5506 		goto fail_1;
   5507 	}
   5508 
   5509 	error = 0;
   5510 	rx_done = 0;
   5511 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5512 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5513 		rxq->rxq_sc = sc;
   5514 #ifdef WM_MPSAFE
   5515 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5516 #else
   5517 		rxq->rxq_lock = NULL;
   5518 #endif
   5519 		error = wm_alloc_rx_descs(sc, rxq);
   5520 		if (error)
   5521 			break;
   5522 
   5523 		error = wm_alloc_rx_buffer(sc, rxq);
   5524 		if (error) {
   5525 			wm_free_rx_descs(sc, rxq);
   5526 			break;
   5527 		}
   5528 
   5529 		rx_done++;
   5530 	}
   5531 	if (error)
   5532 		goto fail_2;
   5533 
   5534 	return 0;
   5535 
   5536  fail_2:
   5537 	for (i = 0; i < rx_done; i++) {
   5538 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5539 		wm_free_rx_buffer(sc, rxq);
   5540 		wm_free_rx_descs(sc, rxq);
   5541 		if (rxq->rxq_lock)
   5542 			mutex_obj_free(rxq->rxq_lock);
   5543 	}
   5544 	kmem_free(sc->sc_rxq,
   5545 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5546  fail_1:
   5547 	for (i = 0; i < tx_done; i++) {
   5548 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5549 		wm_free_tx_buffer(sc, txq);
   5550 		wm_free_tx_descs(sc, txq);
   5551 		if (txq->txq_lock)
   5552 			mutex_obj_free(txq->txq_lock);
   5553 	}
   5554 	kmem_free(sc->sc_txq,
   5555 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5556  fail_0:
   5557 	return error;
   5558 }
   5559 
   5560 /*
   5561  * wm_free_quques:
   5562  *	Free {tx,rx}descs and {tx,rx} buffers
   5563  */
   5564 static void
   5565 wm_free_txrx_queues(struct wm_softc *sc)
   5566 {
   5567 	int i;
   5568 
   5569 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5570 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5571 		wm_free_rx_buffer(sc, rxq);
   5572 		wm_free_rx_descs(sc, rxq);
   5573 		if (rxq->rxq_lock)
   5574 			mutex_obj_free(rxq->rxq_lock);
   5575 	}
   5576 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5577 
   5578 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5579 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5580 		wm_free_tx_buffer(sc, txq);
   5581 		wm_free_tx_descs(sc, txq);
   5582 		if (txq->txq_lock)
   5583 			mutex_obj_free(txq->txq_lock);
   5584 	}
   5585 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5586 }
   5587 
   5588 static void
   5589 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5590 {
   5591 
   5592 	KASSERT(WM_TX_LOCKED(txq));
   5593 
   5594 	/* Initialize the transmit descriptor ring. */
   5595 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5596 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5597 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5598 	txq->txq_free = WM_NTXDESC(txq);
   5599 	txq->txq_next = 0;
   5600 }
   5601 
   5602 static void
   5603 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5604 {
   5605 
   5606 	KASSERT(WM_TX_LOCKED(txq));
   5607 
   5608 	if (sc->sc_type < WM_T_82543) {
   5609 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5610 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5611 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5612 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5613 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5614 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5615 	} else {
   5616 		int qid = txq->txq_id;
   5617 
   5618 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5619 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5620 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
   5621 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5622 
   5623 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5624 			/*
   5625 			 * Don't write TDT before TCTL.EN is set.
   5626 			 * See the document.
   5627 			 */
   5628 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5629 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5630 			    | TXDCTL_WTHRESH(0));
   5631 		else {
   5632 			/* ITR / 4 */
   5633 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5634 			if (sc->sc_type >= WM_T_82540) {
   5635 				/* should be same */
   5636 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5637 			}
   5638 
   5639 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5640 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5641 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5642 		}
   5643 	}
   5644 }
   5645 
   5646 static void
   5647 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5648 {
   5649 	int i;
   5650 
   5651 	KASSERT(WM_TX_LOCKED(txq));
   5652 
   5653 	/* Initialize the transmit job descriptors. */
   5654 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5655 		txq->txq_soft[i].txs_mbuf = NULL;
   5656 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5657 	txq->txq_snext = 0;
   5658 	txq->txq_sdirty = 0;
   5659 }
   5660 
   5661 static void
   5662 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5663 {
   5664 
   5665 	KASSERT(WM_TX_LOCKED(txq));
   5666 
   5667 	/*
   5668 	 * Set up some register offsets that are different between
   5669 	 * the i82542 and the i82543 and later chips.
   5670 	 */
   5671 	if (sc->sc_type < WM_T_82543)
   5672 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5673 	else
   5674 		txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
   5675 
   5676 	wm_init_tx_descs(sc, txq);
   5677 	wm_init_tx_regs(sc, txq);
   5678 	wm_init_tx_buffer(sc, txq);
   5679 }
   5680 
   5681 static void
   5682 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5683 {
   5684 
   5685 	KASSERT(WM_RX_LOCKED(rxq));
   5686 
   5687 	/*
   5688 	 * Initialize the receive descriptor and receive job
   5689 	 * descriptor rings.
   5690 	 */
   5691 	if (sc->sc_type < WM_T_82543) {
   5692 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5693 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5694 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5695 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5696 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5697 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5698 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5699 
   5700 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5701 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5702 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5703 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5704 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5705 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5706 	} else {
   5707 		int qid = rxq->rxq_id;
   5708 
   5709 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5710 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5711 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5712 
   5713 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5714 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5715 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5716 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5717 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5718 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5719 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5720 			    | RXDCTL_WTHRESH(1));
   5721 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5722 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5723 		} else {
   5724 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5725 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5726 			/* ITR / 4 */
   5727 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5728 			/* MUST be same */
   5729 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5730 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5731 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5732 		}
   5733 	}
   5734 }
   5735 
   5736 static int
   5737 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5738 {
   5739 	struct wm_rxsoft *rxs;
   5740 	int error, i;
   5741 
   5742 	KASSERT(WM_RX_LOCKED(rxq));
   5743 
   5744 	for (i = 0; i < WM_NRXDESC; i++) {
   5745 		rxs = &rxq->rxq_soft[i];
   5746 		if (rxs->rxs_mbuf == NULL) {
   5747 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5748 				log(LOG_ERR, "%s: unable to allocate or map "
   5749 				    "rx buffer %d, error = %d\n",
   5750 				    device_xname(sc->sc_dev), i, error);
   5751 				/*
   5752 				 * XXX Should attempt to run with fewer receive
   5753 				 * XXX buffers instead of just failing.
   5754 				 */
   5755 				wm_rxdrain(rxq);
   5756 				return ENOMEM;
   5757 			}
   5758 		} else {
   5759 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5760 				wm_init_rxdesc(rxq, i);
   5761 			/*
   5762 			 * For 82575 and newer device, the RX descriptors
   5763 			 * must be initialized after the setting of RCTL.EN in
   5764 			 * wm_set_filter()
   5765 			 */
   5766 		}
   5767 	}
   5768 	rxq->rxq_ptr = 0;
   5769 	rxq->rxq_discard = 0;
   5770 	WM_RXCHAIN_RESET(rxq);
   5771 
   5772 	return 0;
   5773 }
   5774 
   5775 static int
   5776 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5777 {
   5778 
   5779 	KASSERT(WM_RX_LOCKED(rxq));
   5780 
   5781 	/*
   5782 	 * Set up some register offsets that are different between
   5783 	 * the i82542 and the i82543 and later chips.
   5784 	 */
   5785 	if (sc->sc_type < WM_T_82543)
   5786 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5787 	else
   5788 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5789 
   5790 	wm_init_rx_regs(sc, rxq);
   5791 	return wm_init_rx_buffer(sc, rxq);
   5792 }
   5793 
   5794 /*
   5795  * wm_init_quques:
   5796  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5797  */
   5798 static int
   5799 wm_init_txrx_queues(struct wm_softc *sc)
   5800 {
   5801 	int i, error;
   5802 
   5803 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5804 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5805 		WM_TX_LOCK(txq);
   5806 		wm_init_tx_queue(sc, txq);
   5807 		WM_TX_UNLOCK(txq);
   5808 	}
   5809 
   5810 	error = 0;
   5811 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5812 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5813 		WM_RX_LOCK(rxq);
   5814 		error = wm_init_rx_queue(sc, rxq);
   5815 		WM_RX_UNLOCK(rxq);
   5816 		if (error)
   5817 			break;
   5818 	}
   5819 
   5820 	return error;
   5821 }
   5822 
   5823 /*
   5824  * wm_tx_offload:
   5825  *
   5826  *	Set up TCP/IP checksumming parameters for the
   5827  *	specified packet.
   5828  */
   5829 static int
   5830 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5831     uint8_t *fieldsp)
   5832 {
   5833 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5834 	struct mbuf *m0 = txs->txs_mbuf;
   5835 	struct livengood_tcpip_ctxdesc *t;
   5836 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5837 	uint32_t ipcse;
   5838 	struct ether_header *eh;
   5839 	int offset, iphl;
   5840 	uint8_t fields;
   5841 
   5842 	/*
   5843 	 * XXX It would be nice if the mbuf pkthdr had offset
   5844 	 * fields for the protocol headers.
   5845 	 */
   5846 
   5847 	eh = mtod(m0, struct ether_header *);
   5848 	switch (htons(eh->ether_type)) {
   5849 	case ETHERTYPE_IP:
   5850 	case ETHERTYPE_IPV6:
   5851 		offset = ETHER_HDR_LEN;
   5852 		break;
   5853 
   5854 	case ETHERTYPE_VLAN:
   5855 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5856 		break;
   5857 
   5858 	default:
   5859 		/*
   5860 		 * Don't support this protocol or encapsulation.
   5861 		 */
   5862 		*fieldsp = 0;
   5863 		*cmdp = 0;
   5864 		return 0;
   5865 	}
   5866 
   5867 	if ((m0->m_pkthdr.csum_flags &
   5868 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5869 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5870 	} else {
   5871 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5872 	}
   5873 	ipcse = offset + iphl - 1;
   5874 
   5875 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5876 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5877 	seg = 0;
   5878 	fields = 0;
   5879 
   5880 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5881 		int hlen = offset + iphl;
   5882 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5883 
   5884 		if (__predict_false(m0->m_len <
   5885 				    (hlen + sizeof(struct tcphdr)))) {
   5886 			/*
   5887 			 * TCP/IP headers are not in the first mbuf; we need
   5888 			 * to do this the slow and painful way.  Let's just
   5889 			 * hope this doesn't happen very often.
   5890 			 */
   5891 			struct tcphdr th;
   5892 
   5893 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5894 
   5895 			m_copydata(m0, hlen, sizeof(th), &th);
   5896 			if (v4) {
   5897 				struct ip ip;
   5898 
   5899 				m_copydata(m0, offset, sizeof(ip), &ip);
   5900 				ip.ip_len = 0;
   5901 				m_copyback(m0,
   5902 				    offset + offsetof(struct ip, ip_len),
   5903 				    sizeof(ip.ip_len), &ip.ip_len);
   5904 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5905 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5906 			} else {
   5907 				struct ip6_hdr ip6;
   5908 
   5909 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5910 				ip6.ip6_plen = 0;
   5911 				m_copyback(m0,
   5912 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5913 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5914 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5915 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5916 			}
   5917 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5918 			    sizeof(th.th_sum), &th.th_sum);
   5919 
   5920 			hlen += th.th_off << 2;
   5921 		} else {
   5922 			/*
   5923 			 * TCP/IP headers are in the first mbuf; we can do
   5924 			 * this the easy way.
   5925 			 */
   5926 			struct tcphdr *th;
   5927 
   5928 			if (v4) {
   5929 				struct ip *ip =
   5930 				    (void *)(mtod(m0, char *) + offset);
   5931 				th = (void *)(mtod(m0, char *) + hlen);
   5932 
   5933 				ip->ip_len = 0;
   5934 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5935 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5936 			} else {
   5937 				struct ip6_hdr *ip6 =
   5938 				    (void *)(mtod(m0, char *) + offset);
   5939 				th = (void *)(mtod(m0, char *) + hlen);
   5940 
   5941 				ip6->ip6_plen = 0;
   5942 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5943 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5944 			}
   5945 			hlen += th->th_off << 2;
   5946 		}
   5947 
   5948 		if (v4) {
   5949 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   5950 			cmdlen |= WTX_TCPIP_CMD_IP;
   5951 		} else {
   5952 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   5953 			ipcse = 0;
   5954 		}
   5955 		cmd |= WTX_TCPIP_CMD_TSE;
   5956 		cmdlen |= WTX_TCPIP_CMD_TSE |
   5957 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   5958 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   5959 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   5960 	}
   5961 
   5962 	/*
   5963 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   5964 	 * offload feature, if we load the context descriptor, we
   5965 	 * MUST provide valid values for IPCSS and TUCSS fields.
   5966 	 */
   5967 
   5968 	ipcs = WTX_TCPIP_IPCSS(offset) |
   5969 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   5970 	    WTX_TCPIP_IPCSE(ipcse);
   5971 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   5972 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   5973 		fields |= WTX_IXSM;
   5974 	}
   5975 
   5976 	offset += iphl;
   5977 
   5978 	if (m0->m_pkthdr.csum_flags &
   5979 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   5980 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   5981 		fields |= WTX_TXSM;
   5982 		tucs = WTX_TCPIP_TUCSS(offset) |
   5983 		    WTX_TCPIP_TUCSO(offset +
   5984 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   5985 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5986 	} else if ((m0->m_pkthdr.csum_flags &
   5987 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   5988 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   5989 		fields |= WTX_TXSM;
   5990 		tucs = WTX_TCPIP_TUCSS(offset) |
   5991 		    WTX_TCPIP_TUCSO(offset +
   5992 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   5993 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5994 	} else {
   5995 		/* Just initialize it to a valid TCP context. */
   5996 		tucs = WTX_TCPIP_TUCSS(offset) |
   5997 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   5998 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5999 	}
   6000 
   6001 	/* Fill in the context descriptor. */
   6002 	t = (struct livengood_tcpip_ctxdesc *)
   6003 	    &txq->txq_descs[txq->txq_next];
   6004 	t->tcpip_ipcs = htole32(ipcs);
   6005 	t->tcpip_tucs = htole32(tucs);
   6006 	t->tcpip_cmdlen = htole32(cmdlen);
   6007 	t->tcpip_seg = htole32(seg);
   6008 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6009 
   6010 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6011 	txs->txs_ndesc++;
   6012 
   6013 	*cmdp = cmd;
   6014 	*fieldsp = fields;
   6015 
   6016 	return 0;
   6017 }
   6018 
   6019 /*
   6020  * wm_start:		[ifnet interface function]
   6021  *
   6022  *	Start packet transmission on the interface.
   6023  */
   6024 static void
   6025 wm_start(struct ifnet *ifp)
   6026 {
   6027 	struct wm_softc *sc = ifp->if_softc;
   6028 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6029 
   6030 	WM_TX_LOCK(txq);
   6031 	if (!sc->sc_stopping)
   6032 		wm_start_locked(ifp);
   6033 	WM_TX_UNLOCK(txq);
   6034 }
   6035 
   6036 static void
   6037 wm_start_locked(struct ifnet *ifp)
   6038 {
   6039 	struct wm_softc *sc = ifp->if_softc;
   6040 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6041 	struct mbuf *m0;
   6042 	struct m_tag *mtag;
   6043 	struct wm_txsoft *txs;
   6044 	bus_dmamap_t dmamap;
   6045 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6046 	bus_addr_t curaddr;
   6047 	bus_size_t seglen, curlen;
   6048 	uint32_t cksumcmd;
   6049 	uint8_t cksumfields;
   6050 
   6051 	KASSERT(WM_TX_LOCKED(txq));
   6052 
   6053 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6054 		return;
   6055 
   6056 	/* Remember the previous number of free descriptors. */
   6057 	ofree = txq->txq_free;
   6058 
   6059 	/*
   6060 	 * Loop through the send queue, setting up transmit descriptors
   6061 	 * until we drain the queue, or use up all available transmit
   6062 	 * descriptors.
   6063 	 */
   6064 	for (;;) {
   6065 		m0 = NULL;
   6066 
   6067 		/* Get a work queue entry. */
   6068 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6069 			wm_txeof(sc);
   6070 			if (txq->txq_sfree == 0) {
   6071 				DPRINTF(WM_DEBUG_TX,
   6072 				    ("%s: TX: no free job descriptors\n",
   6073 					device_xname(sc->sc_dev)));
   6074 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6075 				break;
   6076 			}
   6077 		}
   6078 
   6079 		/* Grab a packet off the queue. */
   6080 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6081 		if (m0 == NULL)
   6082 			break;
   6083 
   6084 		DPRINTF(WM_DEBUG_TX,
   6085 		    ("%s: TX: have packet to transmit: %p\n",
   6086 		    device_xname(sc->sc_dev), m0));
   6087 
   6088 		txs = &txq->txq_soft[txq->txq_snext];
   6089 		dmamap = txs->txs_dmamap;
   6090 
   6091 		use_tso = (m0->m_pkthdr.csum_flags &
   6092 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6093 
   6094 		/*
   6095 		 * So says the Linux driver:
   6096 		 * The controller does a simple calculation to make sure
   6097 		 * there is enough room in the FIFO before initiating the
   6098 		 * DMA for each buffer.  The calc is:
   6099 		 *	4 = ceil(buffer len / MSS)
   6100 		 * To make sure we don't overrun the FIFO, adjust the max
   6101 		 * buffer len if the MSS drops.
   6102 		 */
   6103 		dmamap->dm_maxsegsz =
   6104 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6105 		    ? m0->m_pkthdr.segsz << 2
   6106 		    : WTX_MAX_LEN;
   6107 
   6108 		/*
   6109 		 * Load the DMA map.  If this fails, the packet either
   6110 		 * didn't fit in the allotted number of segments, or we
   6111 		 * were short on resources.  For the too-many-segments
   6112 		 * case, we simply report an error and drop the packet,
   6113 		 * since we can't sanely copy a jumbo packet to a single
   6114 		 * buffer.
   6115 		 */
   6116 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6117 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6118 		if (error) {
   6119 			if (error == EFBIG) {
   6120 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6121 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6122 				    "DMA segments, dropping...\n",
   6123 				    device_xname(sc->sc_dev));
   6124 				wm_dump_mbuf_chain(sc, m0);
   6125 				m_freem(m0);
   6126 				continue;
   6127 			}
   6128 			/*  Short on resources, just stop for now. */
   6129 			DPRINTF(WM_DEBUG_TX,
   6130 			    ("%s: TX: dmamap load failed: %d\n",
   6131 			    device_xname(sc->sc_dev), error));
   6132 			break;
   6133 		}
   6134 
   6135 		segs_needed = dmamap->dm_nsegs;
   6136 		if (use_tso) {
   6137 			/* For sentinel descriptor; see below. */
   6138 			segs_needed++;
   6139 		}
   6140 
   6141 		/*
   6142 		 * Ensure we have enough descriptors free to describe
   6143 		 * the packet.  Note, we always reserve one descriptor
   6144 		 * at the end of the ring due to the semantics of the
   6145 		 * TDT register, plus one more in the event we need
   6146 		 * to load offload context.
   6147 		 */
   6148 		if (segs_needed > txq->txq_free - 2) {
   6149 			/*
   6150 			 * Not enough free descriptors to transmit this
   6151 			 * packet.  We haven't committed anything yet,
   6152 			 * so just unload the DMA map, put the packet
   6153 			 * pack on the queue, and punt.  Notify the upper
   6154 			 * layer that there are no more slots left.
   6155 			 */
   6156 			DPRINTF(WM_DEBUG_TX,
   6157 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6158 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6159 			    segs_needed, txq->txq_free - 1));
   6160 			ifp->if_flags |= IFF_OACTIVE;
   6161 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6162 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6163 			break;
   6164 		}
   6165 
   6166 		/*
   6167 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6168 		 * once we know we can transmit the packet, since we
   6169 		 * do some internal FIFO space accounting here.
   6170 		 */
   6171 		if (sc->sc_type == WM_T_82547 &&
   6172 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6173 			DPRINTF(WM_DEBUG_TX,
   6174 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6175 			    device_xname(sc->sc_dev)));
   6176 			ifp->if_flags |= IFF_OACTIVE;
   6177 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6178 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6179 			break;
   6180 		}
   6181 
   6182 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6183 
   6184 		DPRINTF(WM_DEBUG_TX,
   6185 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6186 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6187 
   6188 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6189 
   6190 		/*
   6191 		 * Store a pointer to the packet so that we can free it
   6192 		 * later.
   6193 		 *
   6194 		 * Initially, we consider the number of descriptors the
   6195 		 * packet uses the number of DMA segments.  This may be
   6196 		 * incremented by 1 if we do checksum offload (a descriptor
   6197 		 * is used to set the checksum context).
   6198 		 */
   6199 		txs->txs_mbuf = m0;
   6200 		txs->txs_firstdesc = txq->txq_next;
   6201 		txs->txs_ndesc = segs_needed;
   6202 
   6203 		/* Set up offload parameters for this packet. */
   6204 		if (m0->m_pkthdr.csum_flags &
   6205 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6206 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6207 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6208 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6209 					  &cksumfields) != 0) {
   6210 				/* Error message already displayed. */
   6211 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6212 				continue;
   6213 			}
   6214 		} else {
   6215 			cksumcmd = 0;
   6216 			cksumfields = 0;
   6217 		}
   6218 
   6219 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6220 
   6221 		/* Sync the DMA map. */
   6222 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6223 		    BUS_DMASYNC_PREWRITE);
   6224 
   6225 		/* Initialize the transmit descriptor. */
   6226 		for (nexttx = txq->txq_next, seg = 0;
   6227 		     seg < dmamap->dm_nsegs; seg++) {
   6228 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6229 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6230 			     seglen != 0;
   6231 			     curaddr += curlen, seglen -= curlen,
   6232 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6233 				curlen = seglen;
   6234 
   6235 				/*
   6236 				 * So says the Linux driver:
   6237 				 * Work around for premature descriptor
   6238 				 * write-backs in TSO mode.  Append a
   6239 				 * 4-byte sentinel descriptor.
   6240 				 */
   6241 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6242 				    curlen > 8)
   6243 					curlen -= 4;
   6244 
   6245 				wm_set_dma_addr(
   6246 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6247 				txq->txq_descs[nexttx].wtx_cmdlen
   6248 				    = htole32(cksumcmd | curlen);
   6249 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6250 				    = 0;
   6251 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6252 				    = cksumfields;
   6253 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6254 				lasttx = nexttx;
   6255 
   6256 				DPRINTF(WM_DEBUG_TX,
   6257 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6258 				     "len %#04zx\n",
   6259 				    device_xname(sc->sc_dev), nexttx,
   6260 				    (uint64_t)curaddr, curlen));
   6261 			}
   6262 		}
   6263 
   6264 		KASSERT(lasttx != -1);
   6265 
   6266 		/*
   6267 		 * Set up the command byte on the last descriptor of
   6268 		 * the packet.  If we're in the interrupt delay window,
   6269 		 * delay the interrupt.
   6270 		 */
   6271 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6272 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6273 
   6274 		/*
   6275 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6276 		 * up the descriptor to encapsulate the packet for us.
   6277 		 *
   6278 		 * This is only valid on the last descriptor of the packet.
   6279 		 */
   6280 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6281 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6282 			    htole32(WTX_CMD_VLE);
   6283 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6284 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6285 		}
   6286 
   6287 		txs->txs_lastdesc = lasttx;
   6288 
   6289 		DPRINTF(WM_DEBUG_TX,
   6290 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6291 		    device_xname(sc->sc_dev),
   6292 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6293 
   6294 		/* Sync the descriptors we're using. */
   6295 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6296 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6297 
   6298 		/* Give the packet to the chip. */
   6299 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6300 
   6301 		DPRINTF(WM_DEBUG_TX,
   6302 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6303 
   6304 		DPRINTF(WM_DEBUG_TX,
   6305 		    ("%s: TX: finished transmitting packet, job %d\n",
   6306 		    device_xname(sc->sc_dev), txq->txq_snext));
   6307 
   6308 		/* Advance the tx pointer. */
   6309 		txq->txq_free -= txs->txs_ndesc;
   6310 		txq->txq_next = nexttx;
   6311 
   6312 		txq->txq_sfree--;
   6313 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6314 
   6315 		/* Pass the packet to any BPF listeners. */
   6316 		bpf_mtap(ifp, m0);
   6317 	}
   6318 
   6319 	if (m0 != NULL) {
   6320 		ifp->if_flags |= IFF_OACTIVE;
   6321 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6322 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6323 			__func__));
   6324 		m_freem(m0);
   6325 	}
   6326 
   6327 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6328 		/* No more slots; notify upper layer. */
   6329 		ifp->if_flags |= IFF_OACTIVE;
   6330 	}
   6331 
   6332 	if (txq->txq_free != ofree) {
   6333 		/* Set a watchdog timer in case the chip flakes out. */
   6334 		ifp->if_timer = 5;
   6335 	}
   6336 }
   6337 
   6338 /*
   6339  * wm_nq_tx_offload:
   6340  *
   6341  *	Set up TCP/IP checksumming parameters for the
   6342  *	specified packet, for NEWQUEUE devices
   6343  */
   6344 static int
   6345 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6346     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6347 {
   6348 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6349 	struct mbuf *m0 = txs->txs_mbuf;
   6350 	struct m_tag *mtag;
   6351 	uint32_t vl_len, mssidx, cmdc;
   6352 	struct ether_header *eh;
   6353 	int offset, iphl;
   6354 
   6355 	/*
   6356 	 * XXX It would be nice if the mbuf pkthdr had offset
   6357 	 * fields for the protocol headers.
   6358 	 */
   6359 	*cmdlenp = 0;
   6360 	*fieldsp = 0;
   6361 
   6362 	eh = mtod(m0, struct ether_header *);
   6363 	switch (htons(eh->ether_type)) {
   6364 	case ETHERTYPE_IP:
   6365 	case ETHERTYPE_IPV6:
   6366 		offset = ETHER_HDR_LEN;
   6367 		break;
   6368 
   6369 	case ETHERTYPE_VLAN:
   6370 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6371 		break;
   6372 
   6373 	default:
   6374 		/* Don't support this protocol or encapsulation. */
   6375 		*do_csum = false;
   6376 		return 0;
   6377 	}
   6378 	*do_csum = true;
   6379 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6380 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6381 
   6382 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6383 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6384 
   6385 	if ((m0->m_pkthdr.csum_flags &
   6386 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6387 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6388 	} else {
   6389 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6390 	}
   6391 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6392 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6393 
   6394 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6395 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6396 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6397 		*cmdlenp |= NQTX_CMD_VLE;
   6398 	}
   6399 
   6400 	mssidx = 0;
   6401 
   6402 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6403 		int hlen = offset + iphl;
   6404 		int tcp_hlen;
   6405 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6406 
   6407 		if (__predict_false(m0->m_len <
   6408 				    (hlen + sizeof(struct tcphdr)))) {
   6409 			/*
   6410 			 * TCP/IP headers are not in the first mbuf; we need
   6411 			 * to do this the slow and painful way.  Let's just
   6412 			 * hope this doesn't happen very often.
   6413 			 */
   6414 			struct tcphdr th;
   6415 
   6416 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6417 
   6418 			m_copydata(m0, hlen, sizeof(th), &th);
   6419 			if (v4) {
   6420 				struct ip ip;
   6421 
   6422 				m_copydata(m0, offset, sizeof(ip), &ip);
   6423 				ip.ip_len = 0;
   6424 				m_copyback(m0,
   6425 				    offset + offsetof(struct ip, ip_len),
   6426 				    sizeof(ip.ip_len), &ip.ip_len);
   6427 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6428 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6429 			} else {
   6430 				struct ip6_hdr ip6;
   6431 
   6432 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6433 				ip6.ip6_plen = 0;
   6434 				m_copyback(m0,
   6435 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6436 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6437 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6438 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6439 			}
   6440 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6441 			    sizeof(th.th_sum), &th.th_sum);
   6442 
   6443 			tcp_hlen = th.th_off << 2;
   6444 		} else {
   6445 			/*
   6446 			 * TCP/IP headers are in the first mbuf; we can do
   6447 			 * this the easy way.
   6448 			 */
   6449 			struct tcphdr *th;
   6450 
   6451 			if (v4) {
   6452 				struct ip *ip =
   6453 				    (void *)(mtod(m0, char *) + offset);
   6454 				th = (void *)(mtod(m0, char *) + hlen);
   6455 
   6456 				ip->ip_len = 0;
   6457 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6458 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6459 			} else {
   6460 				struct ip6_hdr *ip6 =
   6461 				    (void *)(mtod(m0, char *) + offset);
   6462 				th = (void *)(mtod(m0, char *) + hlen);
   6463 
   6464 				ip6->ip6_plen = 0;
   6465 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6466 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6467 			}
   6468 			tcp_hlen = th->th_off << 2;
   6469 		}
   6470 		hlen += tcp_hlen;
   6471 		*cmdlenp |= NQTX_CMD_TSE;
   6472 
   6473 		if (v4) {
   6474 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6475 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6476 		} else {
   6477 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6478 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6479 		}
   6480 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6481 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6482 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6483 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6484 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6485 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6486 	} else {
   6487 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6488 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6489 	}
   6490 
   6491 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6492 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6493 		cmdc |= NQTXC_CMD_IP4;
   6494 	}
   6495 
   6496 	if (m0->m_pkthdr.csum_flags &
   6497 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6498 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6499 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6500 			cmdc |= NQTXC_CMD_TCP;
   6501 		} else {
   6502 			cmdc |= NQTXC_CMD_UDP;
   6503 		}
   6504 		cmdc |= NQTXC_CMD_IP4;
   6505 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6506 	}
   6507 	if (m0->m_pkthdr.csum_flags &
   6508 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6509 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6510 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6511 			cmdc |= NQTXC_CMD_TCP;
   6512 		} else {
   6513 			cmdc |= NQTXC_CMD_UDP;
   6514 		}
   6515 		cmdc |= NQTXC_CMD_IP6;
   6516 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6517 	}
   6518 
   6519 	/* Fill in the context descriptor. */
   6520 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6521 	    htole32(vl_len);
   6522 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6523 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6524 	    htole32(cmdc);
   6525 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6526 	    htole32(mssidx);
   6527 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6528 	DPRINTF(WM_DEBUG_TX,
   6529 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6530 	    txq->txq_next, 0, vl_len));
   6531 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6532 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6533 	txs->txs_ndesc++;
   6534 	return 0;
   6535 }
   6536 
   6537 /*
   6538  * wm_nq_start:		[ifnet interface function]
   6539  *
   6540  *	Start packet transmission on the interface for NEWQUEUE devices
   6541  */
   6542 static void
   6543 wm_nq_start(struct ifnet *ifp)
   6544 {
   6545 	struct wm_softc *sc = ifp->if_softc;
   6546 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6547 
   6548 	WM_TX_LOCK(txq);
   6549 	if (!sc->sc_stopping)
   6550 		wm_nq_start_locked(ifp);
   6551 	WM_TX_UNLOCK(txq);
   6552 }
   6553 
   6554 static void
   6555 wm_nq_start_locked(struct ifnet *ifp)
   6556 {
   6557 	struct wm_softc *sc = ifp->if_softc;
   6558 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6559 	struct mbuf *m0;
   6560 	struct m_tag *mtag;
   6561 	struct wm_txsoft *txs;
   6562 	bus_dmamap_t dmamap;
   6563 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6564 	bool do_csum, sent;
   6565 
   6566 	KASSERT(WM_TX_LOCKED(txq));
   6567 
   6568 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6569 		return;
   6570 
   6571 	sent = false;
   6572 
   6573 	/*
   6574 	 * Loop through the send queue, setting up transmit descriptors
   6575 	 * until we drain the queue, or use up all available transmit
   6576 	 * descriptors.
   6577 	 */
   6578 	for (;;) {
   6579 		m0 = NULL;
   6580 
   6581 		/* Get a work queue entry. */
   6582 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6583 			wm_txeof(sc);
   6584 			if (txq->txq_sfree == 0) {
   6585 				DPRINTF(WM_DEBUG_TX,
   6586 				    ("%s: TX: no free job descriptors\n",
   6587 					device_xname(sc->sc_dev)));
   6588 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6589 				break;
   6590 			}
   6591 		}
   6592 
   6593 		/* Grab a packet off the queue. */
   6594 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6595 		if (m0 == NULL)
   6596 			break;
   6597 
   6598 		DPRINTF(WM_DEBUG_TX,
   6599 		    ("%s: TX: have packet to transmit: %p\n",
   6600 		    device_xname(sc->sc_dev), m0));
   6601 
   6602 		txs = &txq->txq_soft[txq->txq_snext];
   6603 		dmamap = txs->txs_dmamap;
   6604 
   6605 		/*
   6606 		 * Load the DMA map.  If this fails, the packet either
   6607 		 * didn't fit in the allotted number of segments, or we
   6608 		 * were short on resources.  For the too-many-segments
   6609 		 * case, we simply report an error and drop the packet,
   6610 		 * since we can't sanely copy a jumbo packet to a single
   6611 		 * buffer.
   6612 		 */
   6613 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6614 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6615 		if (error) {
   6616 			if (error == EFBIG) {
   6617 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6618 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6619 				    "DMA segments, dropping...\n",
   6620 				    device_xname(sc->sc_dev));
   6621 				wm_dump_mbuf_chain(sc, m0);
   6622 				m_freem(m0);
   6623 				continue;
   6624 			}
   6625 			/* Short on resources, just stop for now. */
   6626 			DPRINTF(WM_DEBUG_TX,
   6627 			    ("%s: TX: dmamap load failed: %d\n",
   6628 			    device_xname(sc->sc_dev), error));
   6629 			break;
   6630 		}
   6631 
   6632 		segs_needed = dmamap->dm_nsegs;
   6633 
   6634 		/*
   6635 		 * Ensure we have enough descriptors free to describe
   6636 		 * the packet.  Note, we always reserve one descriptor
   6637 		 * at the end of the ring due to the semantics of the
   6638 		 * TDT register, plus one more in the event we need
   6639 		 * to load offload context.
   6640 		 */
   6641 		if (segs_needed > txq->txq_free - 2) {
   6642 			/*
   6643 			 * Not enough free descriptors to transmit this
   6644 			 * packet.  We haven't committed anything yet,
   6645 			 * so just unload the DMA map, put the packet
   6646 			 * pack on the queue, and punt.  Notify the upper
   6647 			 * layer that there are no more slots left.
   6648 			 */
   6649 			DPRINTF(WM_DEBUG_TX,
   6650 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6651 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6652 			    segs_needed, txq->txq_free - 1));
   6653 			ifp->if_flags |= IFF_OACTIVE;
   6654 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6655 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6656 			break;
   6657 		}
   6658 
   6659 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6660 
   6661 		DPRINTF(WM_DEBUG_TX,
   6662 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6663 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6664 
   6665 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6666 
   6667 		/*
   6668 		 * Store a pointer to the packet so that we can free it
   6669 		 * later.
   6670 		 *
   6671 		 * Initially, we consider the number of descriptors the
   6672 		 * packet uses the number of DMA segments.  This may be
   6673 		 * incremented by 1 if we do checksum offload (a descriptor
   6674 		 * is used to set the checksum context).
   6675 		 */
   6676 		txs->txs_mbuf = m0;
   6677 		txs->txs_firstdesc = txq->txq_next;
   6678 		txs->txs_ndesc = segs_needed;
   6679 
   6680 		/* Set up offload parameters for this packet. */
   6681 		uint32_t cmdlen, fields, dcmdlen;
   6682 		if (m0->m_pkthdr.csum_flags &
   6683 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6684 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6685 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6686 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6687 			    &do_csum) != 0) {
   6688 				/* Error message already displayed. */
   6689 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6690 				continue;
   6691 			}
   6692 		} else {
   6693 			do_csum = false;
   6694 			cmdlen = 0;
   6695 			fields = 0;
   6696 		}
   6697 
   6698 		/* Sync the DMA map. */
   6699 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6700 		    BUS_DMASYNC_PREWRITE);
   6701 
   6702 		/* Initialize the first transmit descriptor. */
   6703 		nexttx = txq->txq_next;
   6704 		if (!do_csum) {
   6705 			/* setup a legacy descriptor */
   6706 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6707 			    dmamap->dm_segs[0].ds_addr);
   6708 			txq->txq_descs[nexttx].wtx_cmdlen =
   6709 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6710 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6711 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6712 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6713 			    NULL) {
   6714 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6715 				    htole32(WTX_CMD_VLE);
   6716 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6717 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6718 			} else {
   6719 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6720 			}
   6721 			dcmdlen = 0;
   6722 		} else {
   6723 			/* setup an advanced data descriptor */
   6724 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6725 			    htole64(dmamap->dm_segs[0].ds_addr);
   6726 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6727 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6728 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6729 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6730 			    htole32(fields);
   6731 			DPRINTF(WM_DEBUG_TX,
   6732 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6733 			    device_xname(sc->sc_dev), nexttx,
   6734 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6735 			DPRINTF(WM_DEBUG_TX,
   6736 			    ("\t 0x%08x%08x\n", fields,
   6737 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6738 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6739 		}
   6740 
   6741 		lasttx = nexttx;
   6742 		nexttx = WM_NEXTTX(txq, nexttx);
   6743 		/*
   6744 		 * fill in the next descriptors. legacy or adcanced format
   6745 		 * is the same here
   6746 		 */
   6747 		for (seg = 1; seg < dmamap->dm_nsegs;
   6748 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6749 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6750 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6751 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6752 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6753 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6754 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6755 			lasttx = nexttx;
   6756 
   6757 			DPRINTF(WM_DEBUG_TX,
   6758 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6759 			     "len %#04zx\n",
   6760 			    device_xname(sc->sc_dev), nexttx,
   6761 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6762 			    dmamap->dm_segs[seg].ds_len));
   6763 		}
   6764 
   6765 		KASSERT(lasttx != -1);
   6766 
   6767 		/*
   6768 		 * Set up the command byte on the last descriptor of
   6769 		 * the packet.  If we're in the interrupt delay window,
   6770 		 * delay the interrupt.
   6771 		 */
   6772 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6773 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6774 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6775 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6776 
   6777 		txs->txs_lastdesc = lasttx;
   6778 
   6779 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6780 		    device_xname(sc->sc_dev),
   6781 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6782 
   6783 		/* Sync the descriptors we're using. */
   6784 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6785 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6786 
   6787 		/* Give the packet to the chip. */
   6788 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6789 		sent = true;
   6790 
   6791 		DPRINTF(WM_DEBUG_TX,
   6792 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6793 
   6794 		DPRINTF(WM_DEBUG_TX,
   6795 		    ("%s: TX: finished transmitting packet, job %d\n",
   6796 		    device_xname(sc->sc_dev), txq->txq_snext));
   6797 
   6798 		/* Advance the tx pointer. */
   6799 		txq->txq_free -= txs->txs_ndesc;
   6800 		txq->txq_next = nexttx;
   6801 
   6802 		txq->txq_sfree--;
   6803 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6804 
   6805 		/* Pass the packet to any BPF listeners. */
   6806 		bpf_mtap(ifp, m0);
   6807 	}
   6808 
   6809 	if (m0 != NULL) {
   6810 		ifp->if_flags |= IFF_OACTIVE;
   6811 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6812 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6813 			__func__));
   6814 		m_freem(m0);
   6815 	}
   6816 
   6817 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6818 		/* No more slots; notify upper layer. */
   6819 		ifp->if_flags |= IFF_OACTIVE;
   6820 	}
   6821 
   6822 	if (sent) {
   6823 		/* Set a watchdog timer in case the chip flakes out. */
   6824 		ifp->if_timer = 5;
   6825 	}
   6826 }
   6827 
   6828 /* Interrupt */
   6829 
   6830 /*
   6831  * wm_txeof:
   6832  *
   6833  *	Helper; handle transmit interrupts.
   6834  */
   6835 static int
   6836 wm_txeof(struct wm_softc *sc)
   6837 {
   6838 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6839 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6840 	struct wm_txsoft *txs;
   6841 	bool processed = false;
   6842 	int count = 0;
   6843 	int i;
   6844 	uint8_t status;
   6845 
   6846 	if (sc->sc_stopping)
   6847 		return 0;
   6848 
   6849 	ifp->if_flags &= ~IFF_OACTIVE;
   6850 
   6851 	/*
   6852 	 * Go through the Tx list and free mbufs for those
   6853 	 * frames which have been transmitted.
   6854 	 */
   6855 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6856 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6857 		txs = &txq->txq_soft[i];
   6858 
   6859 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   6860 			device_xname(sc->sc_dev), i));
   6861 
   6862 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6863 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   6864 
   6865 		status =
   6866 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6867 		if ((status & WTX_ST_DD) == 0) {
   6868 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6869 			    BUS_DMASYNC_PREREAD);
   6870 			break;
   6871 		}
   6872 
   6873 		processed = true;
   6874 		count++;
   6875 		DPRINTF(WM_DEBUG_TX,
   6876 		    ("%s: TX: job %d done: descs %d..%d\n",
   6877 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6878 		    txs->txs_lastdesc));
   6879 
   6880 		/*
   6881 		 * XXX We should probably be using the statistics
   6882 		 * XXX registers, but I don't know if they exist
   6883 		 * XXX on chips before the i82544.
   6884 		 */
   6885 
   6886 #ifdef WM_EVENT_COUNTERS
   6887 		if (status & WTX_ST_TU)
   6888 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   6889 #endif /* WM_EVENT_COUNTERS */
   6890 
   6891 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   6892 			ifp->if_oerrors++;
   6893 			if (status & WTX_ST_LC)
   6894 				log(LOG_WARNING, "%s: late collision\n",
   6895 				    device_xname(sc->sc_dev));
   6896 			else if (status & WTX_ST_EC) {
   6897 				ifp->if_collisions += 16;
   6898 				log(LOG_WARNING, "%s: excessive collisions\n",
   6899 				    device_xname(sc->sc_dev));
   6900 			}
   6901 		} else
   6902 			ifp->if_opackets++;
   6903 
   6904 		txq->txq_free += txs->txs_ndesc;
   6905 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   6906 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   6907 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   6908 		m_freem(txs->txs_mbuf);
   6909 		txs->txs_mbuf = NULL;
   6910 	}
   6911 
   6912 	/* Update the dirty transmit buffer pointer. */
   6913 	txq->txq_sdirty = i;
   6914 	DPRINTF(WM_DEBUG_TX,
   6915 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   6916 
   6917 	if (count != 0)
   6918 		rnd_add_uint32(&sc->rnd_source, count);
   6919 
   6920 	/*
   6921 	 * If there are no more pending transmissions, cancel the watchdog
   6922 	 * timer.
   6923 	 */
   6924 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   6925 		ifp->if_timer = 0;
   6926 
   6927 	return processed;
   6928 }
   6929 
   6930 /*
   6931  * wm_rxeof:
   6932  *
   6933  *	Helper; handle receive interrupts.
   6934  */
   6935 static void
   6936 wm_rxeof(struct wm_rxqueue *rxq)
   6937 {
   6938 	struct wm_softc *sc = rxq->rxq_sc;
   6939 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6940 	struct wm_rxsoft *rxs;
   6941 	struct mbuf *m;
   6942 	int i, len;
   6943 	int count = 0;
   6944 	uint8_t status, errors;
   6945 	uint16_t vlantag;
   6946 
   6947 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   6948 		rxs = &rxq->rxq_soft[i];
   6949 
   6950 		DPRINTF(WM_DEBUG_RX,
   6951 		    ("%s: RX: checking descriptor %d\n",
   6952 		    device_xname(sc->sc_dev), i));
   6953 
   6954 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6955 
   6956 		status = rxq->rxq_descs[i].wrx_status;
   6957 		errors = rxq->rxq_descs[i].wrx_errors;
   6958 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   6959 		vlantag = rxq->rxq_descs[i].wrx_special;
   6960 
   6961 		if ((status & WRX_ST_DD) == 0) {
   6962 			/* We have processed all of the receive descriptors. */
   6963 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   6964 			break;
   6965 		}
   6966 
   6967 		count++;
   6968 		if (__predict_false(rxq->rxq_discard)) {
   6969 			DPRINTF(WM_DEBUG_RX,
   6970 			    ("%s: RX: discarding contents of descriptor %d\n",
   6971 			    device_xname(sc->sc_dev), i));
   6972 			wm_init_rxdesc(rxq, i);
   6973 			if (status & WRX_ST_EOP) {
   6974 				/* Reset our state. */
   6975 				DPRINTF(WM_DEBUG_RX,
   6976 				    ("%s: RX: resetting rxdiscard -> 0\n",
   6977 				    device_xname(sc->sc_dev)));
   6978 				rxq->rxq_discard = 0;
   6979 			}
   6980 			continue;
   6981 		}
   6982 
   6983 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6984 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   6985 
   6986 		m = rxs->rxs_mbuf;
   6987 
   6988 		/*
   6989 		 * Add a new receive buffer to the ring, unless of
   6990 		 * course the length is zero. Treat the latter as a
   6991 		 * failed mapping.
   6992 		 */
   6993 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   6994 			/*
   6995 			 * Failed, throw away what we've done so
   6996 			 * far, and discard the rest of the packet.
   6997 			 */
   6998 			ifp->if_ierrors++;
   6999 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7000 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7001 			wm_init_rxdesc(rxq, i);
   7002 			if ((status & WRX_ST_EOP) == 0)
   7003 				rxq->rxq_discard = 1;
   7004 			if (rxq->rxq_head != NULL)
   7005 				m_freem(rxq->rxq_head);
   7006 			WM_RXCHAIN_RESET(rxq);
   7007 			DPRINTF(WM_DEBUG_RX,
   7008 			    ("%s: RX: Rx buffer allocation failed, "
   7009 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7010 			    rxq->rxq_discard ? " (discard)" : ""));
   7011 			continue;
   7012 		}
   7013 
   7014 		m->m_len = len;
   7015 		rxq->rxq_len += len;
   7016 		DPRINTF(WM_DEBUG_RX,
   7017 		    ("%s: RX: buffer at %p len %d\n",
   7018 		    device_xname(sc->sc_dev), m->m_data, len));
   7019 
   7020 		/* If this is not the end of the packet, keep looking. */
   7021 		if ((status & WRX_ST_EOP) == 0) {
   7022 			WM_RXCHAIN_LINK(rxq, m);
   7023 			DPRINTF(WM_DEBUG_RX,
   7024 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7025 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7026 			continue;
   7027 		}
   7028 
   7029 		/*
   7030 		 * Okay, we have the entire packet now.  The chip is
   7031 		 * configured to include the FCS except I350 and I21[01]
   7032 		 * (not all chips can be configured to strip it),
   7033 		 * so we need to trim it.
   7034 		 * May need to adjust length of previous mbuf in the
   7035 		 * chain if the current mbuf is too short.
   7036 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7037 		 * is always set in I350, so we don't trim it.
   7038 		 */
   7039 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7040 		    && (sc->sc_type != WM_T_I210)
   7041 		    && (sc->sc_type != WM_T_I211)) {
   7042 			if (m->m_len < ETHER_CRC_LEN) {
   7043 				rxq->rxq_tail->m_len
   7044 				    -= (ETHER_CRC_LEN - m->m_len);
   7045 				m->m_len = 0;
   7046 			} else
   7047 				m->m_len -= ETHER_CRC_LEN;
   7048 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7049 		} else
   7050 			len = rxq->rxq_len;
   7051 
   7052 		WM_RXCHAIN_LINK(rxq, m);
   7053 
   7054 		*rxq->rxq_tailp = NULL;
   7055 		m = rxq->rxq_head;
   7056 
   7057 		WM_RXCHAIN_RESET(rxq);
   7058 
   7059 		DPRINTF(WM_DEBUG_RX,
   7060 		    ("%s: RX: have entire packet, len -> %d\n",
   7061 		    device_xname(sc->sc_dev), len));
   7062 
   7063 		/* If an error occurred, update stats and drop the packet. */
   7064 		if (errors &
   7065 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7066 			if (errors & WRX_ER_SE)
   7067 				log(LOG_WARNING, "%s: symbol error\n",
   7068 				    device_xname(sc->sc_dev));
   7069 			else if (errors & WRX_ER_SEQ)
   7070 				log(LOG_WARNING, "%s: receive sequence error\n",
   7071 				    device_xname(sc->sc_dev));
   7072 			else if (errors & WRX_ER_CE)
   7073 				log(LOG_WARNING, "%s: CRC error\n",
   7074 				    device_xname(sc->sc_dev));
   7075 			m_freem(m);
   7076 			continue;
   7077 		}
   7078 
   7079 		/* No errors.  Receive the packet. */
   7080 		m->m_pkthdr.rcvif = ifp;
   7081 		m->m_pkthdr.len = len;
   7082 
   7083 		/*
   7084 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7085 		 * for us.  Associate the tag with the packet.
   7086 		 */
   7087 		/* XXXX should check for i350 and i354 */
   7088 		if ((status & WRX_ST_VP) != 0) {
   7089 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7090 		}
   7091 
   7092 		/* Set up checksum info for this packet. */
   7093 		if ((status & WRX_ST_IXSM) == 0) {
   7094 			if (status & WRX_ST_IPCS) {
   7095 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7096 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7097 				if (errors & WRX_ER_IPE)
   7098 					m->m_pkthdr.csum_flags |=
   7099 					    M_CSUM_IPv4_BAD;
   7100 			}
   7101 			if (status & WRX_ST_TCPCS) {
   7102 				/*
   7103 				 * Note: we don't know if this was TCP or UDP,
   7104 				 * so we just set both bits, and expect the
   7105 				 * upper layers to deal.
   7106 				 */
   7107 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7108 				m->m_pkthdr.csum_flags |=
   7109 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7110 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7111 				if (errors & WRX_ER_TCPE)
   7112 					m->m_pkthdr.csum_flags |=
   7113 					    M_CSUM_TCP_UDP_BAD;
   7114 			}
   7115 		}
   7116 
   7117 		ifp->if_ipackets++;
   7118 
   7119 		WM_RX_UNLOCK(rxq);
   7120 
   7121 		/* Pass this up to any BPF listeners. */
   7122 		bpf_mtap(ifp, m);
   7123 
   7124 		/* Pass it on. */
   7125 		if_percpuq_enqueue(sc->sc_ipq, m);
   7126 
   7127 		WM_RX_LOCK(rxq);
   7128 
   7129 		if (sc->sc_stopping)
   7130 			break;
   7131 	}
   7132 
   7133 	/* Update the receive pointer. */
   7134 	rxq->rxq_ptr = i;
   7135 	if (count != 0)
   7136 		rnd_add_uint32(&sc->rnd_source, count);
   7137 
   7138 	DPRINTF(WM_DEBUG_RX,
   7139 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7140 }
   7141 
   7142 /*
   7143  * wm_linkintr_gmii:
   7144  *
   7145  *	Helper; handle link interrupts for GMII.
   7146  */
   7147 static void
   7148 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7149 {
   7150 
   7151 	KASSERT(WM_CORE_LOCKED(sc));
   7152 
   7153 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7154 		__func__));
   7155 
   7156 	if (icr & ICR_LSC) {
   7157 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7158 
   7159 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7160 			wm_gig_downshift_workaround_ich8lan(sc);
   7161 
   7162 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7163 			device_xname(sc->sc_dev)));
   7164 		mii_pollstat(&sc->sc_mii);
   7165 		if (sc->sc_type == WM_T_82543) {
   7166 			int miistatus, active;
   7167 
   7168 			/*
   7169 			 * With 82543, we need to force speed and
   7170 			 * duplex on the MAC equal to what the PHY
   7171 			 * speed and duplex configuration is.
   7172 			 */
   7173 			miistatus = sc->sc_mii.mii_media_status;
   7174 
   7175 			if (miistatus & IFM_ACTIVE) {
   7176 				active = sc->sc_mii.mii_media_active;
   7177 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7178 				switch (IFM_SUBTYPE(active)) {
   7179 				case IFM_10_T:
   7180 					sc->sc_ctrl |= CTRL_SPEED_10;
   7181 					break;
   7182 				case IFM_100_TX:
   7183 					sc->sc_ctrl |= CTRL_SPEED_100;
   7184 					break;
   7185 				case IFM_1000_T:
   7186 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7187 					break;
   7188 				default:
   7189 					/*
   7190 					 * fiber?
   7191 					 * Shoud not enter here.
   7192 					 */
   7193 					printf("unknown media (%x)\n", active);
   7194 					break;
   7195 				}
   7196 				if (active & IFM_FDX)
   7197 					sc->sc_ctrl |= CTRL_FD;
   7198 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7199 			}
   7200 		} else if ((sc->sc_type == WM_T_ICH8)
   7201 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7202 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7203 		} else if (sc->sc_type == WM_T_PCH) {
   7204 			wm_k1_gig_workaround_hv(sc,
   7205 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7206 		}
   7207 
   7208 		if ((sc->sc_phytype == WMPHY_82578)
   7209 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7210 			== IFM_1000_T)) {
   7211 
   7212 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7213 				delay(200*1000); /* XXX too big */
   7214 
   7215 				/* Link stall fix for link up */
   7216 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7217 				    HV_MUX_DATA_CTRL,
   7218 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7219 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7220 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7221 				    HV_MUX_DATA_CTRL,
   7222 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7223 			}
   7224 		}
   7225 	} else if (icr & ICR_RXSEQ) {
   7226 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7227 			device_xname(sc->sc_dev)));
   7228 	}
   7229 }
   7230 
   7231 /*
   7232  * wm_linkintr_tbi:
   7233  *
   7234  *	Helper; handle link interrupts for TBI mode.
   7235  */
   7236 static void
   7237 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7238 {
   7239 	uint32_t status;
   7240 
   7241 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7242 		__func__));
   7243 
   7244 	status = CSR_READ(sc, WMREG_STATUS);
   7245 	if (icr & ICR_LSC) {
   7246 		if (status & STATUS_LU) {
   7247 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7248 			    device_xname(sc->sc_dev),
   7249 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7250 			/*
   7251 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7252 			 * so we should update sc->sc_ctrl
   7253 			 */
   7254 
   7255 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7256 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7257 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7258 			if (status & STATUS_FD)
   7259 				sc->sc_tctl |=
   7260 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7261 			else
   7262 				sc->sc_tctl |=
   7263 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7264 			if (sc->sc_ctrl & CTRL_TFCE)
   7265 				sc->sc_fcrtl |= FCRTL_XONE;
   7266 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7267 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7268 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7269 				      sc->sc_fcrtl);
   7270 			sc->sc_tbi_linkup = 1;
   7271 		} else {
   7272 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7273 			    device_xname(sc->sc_dev)));
   7274 			sc->sc_tbi_linkup = 0;
   7275 		}
   7276 		/* Update LED */
   7277 		wm_tbi_serdes_set_linkled(sc);
   7278 	} else if (icr & ICR_RXSEQ) {
   7279 		DPRINTF(WM_DEBUG_LINK,
   7280 		    ("%s: LINK: Receive sequence error\n",
   7281 		    device_xname(sc->sc_dev)));
   7282 	}
   7283 }
   7284 
   7285 /*
   7286  * wm_linkintr_serdes:
   7287  *
   7288  *	Helper; handle link interrupts for TBI mode.
   7289  */
   7290 static void
   7291 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7292 {
   7293 	struct mii_data *mii = &sc->sc_mii;
   7294 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7295 	uint32_t pcs_adv, pcs_lpab, reg;
   7296 
   7297 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7298 		__func__));
   7299 
   7300 	if (icr & ICR_LSC) {
   7301 		/* Check PCS */
   7302 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7303 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7304 			mii->mii_media_status |= IFM_ACTIVE;
   7305 			sc->sc_tbi_linkup = 1;
   7306 		} else {
   7307 			mii->mii_media_status |= IFM_NONE;
   7308 			sc->sc_tbi_linkup = 0;
   7309 			wm_tbi_serdes_set_linkled(sc);
   7310 			return;
   7311 		}
   7312 		mii->mii_media_active |= IFM_1000_SX;
   7313 		if ((reg & PCS_LSTS_FDX) != 0)
   7314 			mii->mii_media_active |= IFM_FDX;
   7315 		else
   7316 			mii->mii_media_active |= IFM_HDX;
   7317 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7318 			/* Check flow */
   7319 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7320 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7321 				DPRINTF(WM_DEBUG_LINK,
   7322 				    ("XXX LINKOK but not ACOMP\n"));
   7323 				return;
   7324 			}
   7325 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7326 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7327 			DPRINTF(WM_DEBUG_LINK,
   7328 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7329 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7330 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7331 				mii->mii_media_active |= IFM_FLOW
   7332 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7333 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7334 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7335 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7336 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7337 				mii->mii_media_active |= IFM_FLOW
   7338 				    | IFM_ETH_TXPAUSE;
   7339 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7340 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7341 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7342 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7343 				mii->mii_media_active |= IFM_FLOW
   7344 				    | IFM_ETH_RXPAUSE;
   7345 		}
   7346 		/* Update LED */
   7347 		wm_tbi_serdes_set_linkled(sc);
   7348 	} else {
   7349 		DPRINTF(WM_DEBUG_LINK,
   7350 		    ("%s: LINK: Receive sequence error\n",
   7351 		    device_xname(sc->sc_dev)));
   7352 	}
   7353 }
   7354 
   7355 /*
   7356  * wm_linkintr:
   7357  *
   7358  *	Helper; handle link interrupts.
   7359  */
   7360 static void
   7361 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7362 {
   7363 
   7364 	KASSERT(WM_CORE_LOCKED(sc));
   7365 
   7366 	if (sc->sc_flags & WM_F_HAS_MII)
   7367 		wm_linkintr_gmii(sc, icr);
   7368 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7369 	    && (sc->sc_type >= WM_T_82575))
   7370 		wm_linkintr_serdes(sc, icr);
   7371 	else
   7372 		wm_linkintr_tbi(sc, icr);
   7373 }
   7374 
   7375 /*
   7376  * wm_intr_legacy:
   7377  *
   7378  *	Interrupt service routine for INTx and MSI.
   7379  */
   7380 static int
   7381 wm_intr_legacy(void *arg)
   7382 {
   7383 	struct wm_softc *sc = arg;
   7384 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7385 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7386 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7387 	uint32_t icr, rndval = 0;
   7388 	int handled = 0;
   7389 
   7390 	DPRINTF(WM_DEBUG_TX,
   7391 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7392 	while (1 /* CONSTCOND */) {
   7393 		icr = CSR_READ(sc, WMREG_ICR);
   7394 		if ((icr & sc->sc_icr) == 0)
   7395 			break;
   7396 		if (rndval == 0)
   7397 			rndval = icr;
   7398 
   7399 		WM_RX_LOCK(rxq);
   7400 
   7401 		if (sc->sc_stopping) {
   7402 			WM_RX_UNLOCK(rxq);
   7403 			break;
   7404 		}
   7405 
   7406 		handled = 1;
   7407 
   7408 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7409 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7410 			DPRINTF(WM_DEBUG_RX,
   7411 			    ("%s: RX: got Rx intr 0x%08x\n",
   7412 			    device_xname(sc->sc_dev),
   7413 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7414 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7415 		}
   7416 #endif
   7417 		wm_rxeof(rxq);
   7418 
   7419 		WM_RX_UNLOCK(rxq);
   7420 		WM_TX_LOCK(txq);
   7421 
   7422 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7423 		if (icr & ICR_TXDW) {
   7424 			DPRINTF(WM_DEBUG_TX,
   7425 			    ("%s: TX: got TXDW interrupt\n",
   7426 			    device_xname(sc->sc_dev)));
   7427 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7428 		}
   7429 #endif
   7430 		wm_txeof(sc);
   7431 
   7432 		WM_TX_UNLOCK(txq);
   7433 		WM_CORE_LOCK(sc);
   7434 
   7435 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7436 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7437 			wm_linkintr(sc, icr);
   7438 		}
   7439 
   7440 		WM_CORE_UNLOCK(sc);
   7441 
   7442 		if (icr & ICR_RXO) {
   7443 #if defined(WM_DEBUG)
   7444 			log(LOG_WARNING, "%s: Receive overrun\n",
   7445 			    device_xname(sc->sc_dev));
   7446 #endif /* defined(WM_DEBUG) */
   7447 		}
   7448 	}
   7449 
   7450 	rnd_add_uint32(&sc->rnd_source, rndval);
   7451 
   7452 	if (handled) {
   7453 		/* Try to get more packets going. */
   7454 		ifp->if_start(ifp);
   7455 	}
   7456 
   7457 	return handled;
   7458 }
   7459 
   7460 /*
   7461  * wm_txintr_msix:
   7462  *
   7463  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7464  */
   7465 static int
   7466 wm_txintr_msix(void *arg)
   7467 {
   7468 	struct wm_txqueue *txq = arg;
   7469 	struct wm_softc *sc = txq->txq_sc;
   7470 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7471 	int handled = 0;
   7472 
   7473 	DPRINTF(WM_DEBUG_TX,
   7474 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7475 
   7476 	if (sc->sc_type == WM_T_82574)
   7477 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id));
   7478 	else if (sc->sc_type == WM_T_82575)
   7479 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7480 	else
   7481 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7482 
   7483 	WM_TX_LOCK(txq);
   7484 
   7485 	if (sc->sc_stopping)
   7486 		goto out;
   7487 
   7488 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7489 	handled = wm_txeof(sc);
   7490 
   7491 out:
   7492 	WM_TX_UNLOCK(txq);
   7493 
   7494 	if (sc->sc_type == WM_T_82574)
   7495 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id));
   7496 	else if (sc->sc_type == WM_T_82575)
   7497 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7498 	else
   7499 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7500 
   7501 	if (handled) {
   7502 		/* Try to get more packets going. */
   7503 		ifp->if_start(ifp);
   7504 	}
   7505 
   7506 	return handled;
   7507 }
   7508 
   7509 /*
   7510  * wm_rxintr_msix:
   7511  *
   7512  *	Interrupt service routine for RX interrupt for MSI-X.
   7513  */
   7514 static int
   7515 wm_rxintr_msix(void *arg)
   7516 {
   7517 	struct wm_rxqueue *rxq = arg;
   7518 	struct wm_softc *sc = rxq->rxq_sc;
   7519 
   7520 	DPRINTF(WM_DEBUG_RX,
   7521 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7522 
   7523 	if (sc->sc_type == WM_T_82574)
   7524 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id));
   7525 	else if (sc->sc_type == WM_T_82575)
   7526 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7527 	else
   7528 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7529 
   7530 	WM_RX_LOCK(rxq);
   7531 
   7532 	if (sc->sc_stopping)
   7533 		goto out;
   7534 
   7535 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7536 	wm_rxeof(rxq);
   7537 
   7538 out:
   7539 	WM_RX_UNLOCK(rxq);
   7540 
   7541 	if (sc->sc_type == WM_T_82574)
   7542 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7543 	else if (sc->sc_type == WM_T_82575)
   7544 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7545 	else
   7546 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7547 
   7548 	return 1;
   7549 }
   7550 
   7551 /*
   7552  * wm_linkintr_msix:
   7553  *
   7554  *	Interrupt service routine for link status change for MSI-X.
   7555  */
   7556 static int
   7557 wm_linkintr_msix(void *arg)
   7558 {
   7559 	struct wm_softc *sc = arg;
   7560 	uint32_t reg;
   7561 
   7562 	DPRINTF(WM_DEBUG_LINK,
   7563 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7564 
   7565 	reg = CSR_READ(sc, WMREG_ICR);
   7566 	WM_CORE_LOCK(sc);
   7567 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7568 		goto out;
   7569 
   7570 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7571 	wm_linkintr(sc, ICR_LSC);
   7572 
   7573 out:
   7574 	WM_CORE_UNLOCK(sc);
   7575 
   7576 	if (sc->sc_type == WM_T_82574)
   7577 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7578 	else if (sc->sc_type == WM_T_82575)
   7579 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7580 	else
   7581 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7582 
   7583 	return 1;
   7584 }
   7585 
   7586 /*
   7587  * Media related.
   7588  * GMII, SGMII, TBI (and SERDES)
   7589  */
   7590 
   7591 /* Common */
   7592 
   7593 /*
   7594  * wm_tbi_serdes_set_linkled:
   7595  *
   7596  *	Update the link LED on TBI and SERDES devices.
   7597  */
   7598 static void
   7599 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7600 {
   7601 
   7602 	if (sc->sc_tbi_linkup)
   7603 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7604 	else
   7605 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7606 
   7607 	/* 82540 or newer devices are active low */
   7608 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7609 
   7610 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7611 }
   7612 
   7613 /* GMII related */
   7614 
   7615 /*
   7616  * wm_gmii_reset:
   7617  *
   7618  *	Reset the PHY.
   7619  */
   7620 static void
   7621 wm_gmii_reset(struct wm_softc *sc)
   7622 {
   7623 	uint32_t reg;
   7624 	int rv;
   7625 
   7626 	/* get phy semaphore */
   7627 	switch (sc->sc_type) {
   7628 	case WM_T_82571:
   7629 	case WM_T_82572:
   7630 	case WM_T_82573:
   7631 	case WM_T_82574:
   7632 	case WM_T_82583:
   7633 		 /* XXX should get sw semaphore, too */
   7634 		rv = wm_get_swsm_semaphore(sc);
   7635 		break;
   7636 	case WM_T_82575:
   7637 	case WM_T_82576:
   7638 	case WM_T_82580:
   7639 	case WM_T_I350:
   7640 	case WM_T_I354:
   7641 	case WM_T_I210:
   7642 	case WM_T_I211:
   7643 	case WM_T_80003:
   7644 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7645 		break;
   7646 	case WM_T_ICH8:
   7647 	case WM_T_ICH9:
   7648 	case WM_T_ICH10:
   7649 	case WM_T_PCH:
   7650 	case WM_T_PCH2:
   7651 	case WM_T_PCH_LPT:
   7652 		rv = wm_get_swfwhw_semaphore(sc);
   7653 		break;
   7654 	default:
   7655 		/* nothing to do*/
   7656 		rv = 0;
   7657 		break;
   7658 	}
   7659 	if (rv != 0) {
   7660 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7661 		    __func__);
   7662 		return;
   7663 	}
   7664 
   7665 	switch (sc->sc_type) {
   7666 	case WM_T_82542_2_0:
   7667 	case WM_T_82542_2_1:
   7668 		/* null */
   7669 		break;
   7670 	case WM_T_82543:
   7671 		/*
   7672 		 * With 82543, we need to force speed and duplex on the MAC
   7673 		 * equal to what the PHY speed and duplex configuration is.
   7674 		 * In addition, we need to perform a hardware reset on the PHY
   7675 		 * to take it out of reset.
   7676 		 */
   7677 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7678 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7679 
   7680 		/* The PHY reset pin is active-low. */
   7681 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7682 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7683 		    CTRL_EXT_SWDPIN(4));
   7684 		reg |= CTRL_EXT_SWDPIO(4);
   7685 
   7686 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7687 		CSR_WRITE_FLUSH(sc);
   7688 		delay(10*1000);
   7689 
   7690 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7691 		CSR_WRITE_FLUSH(sc);
   7692 		delay(150);
   7693 #if 0
   7694 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7695 #endif
   7696 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7697 		break;
   7698 	case WM_T_82544:	/* reset 10000us */
   7699 	case WM_T_82540:
   7700 	case WM_T_82545:
   7701 	case WM_T_82545_3:
   7702 	case WM_T_82546:
   7703 	case WM_T_82546_3:
   7704 	case WM_T_82541:
   7705 	case WM_T_82541_2:
   7706 	case WM_T_82547:
   7707 	case WM_T_82547_2:
   7708 	case WM_T_82571:	/* reset 100us */
   7709 	case WM_T_82572:
   7710 	case WM_T_82573:
   7711 	case WM_T_82574:
   7712 	case WM_T_82575:
   7713 	case WM_T_82576:
   7714 	case WM_T_82580:
   7715 	case WM_T_I350:
   7716 	case WM_T_I354:
   7717 	case WM_T_I210:
   7718 	case WM_T_I211:
   7719 	case WM_T_82583:
   7720 	case WM_T_80003:
   7721 		/* generic reset */
   7722 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7723 		CSR_WRITE_FLUSH(sc);
   7724 		delay(20000);
   7725 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7726 		CSR_WRITE_FLUSH(sc);
   7727 		delay(20000);
   7728 
   7729 		if ((sc->sc_type == WM_T_82541)
   7730 		    || (sc->sc_type == WM_T_82541_2)
   7731 		    || (sc->sc_type == WM_T_82547)
   7732 		    || (sc->sc_type == WM_T_82547_2)) {
   7733 			/* workaround for igp are done in igp_reset() */
   7734 			/* XXX add code to set LED after phy reset */
   7735 		}
   7736 		break;
   7737 	case WM_T_ICH8:
   7738 	case WM_T_ICH9:
   7739 	case WM_T_ICH10:
   7740 	case WM_T_PCH:
   7741 	case WM_T_PCH2:
   7742 	case WM_T_PCH_LPT:
   7743 		/* generic reset */
   7744 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7745 		CSR_WRITE_FLUSH(sc);
   7746 		delay(100);
   7747 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7748 		CSR_WRITE_FLUSH(sc);
   7749 		delay(150);
   7750 		break;
   7751 	default:
   7752 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7753 		    __func__);
   7754 		break;
   7755 	}
   7756 
   7757 	/* release PHY semaphore */
   7758 	switch (sc->sc_type) {
   7759 	case WM_T_82571:
   7760 	case WM_T_82572:
   7761 	case WM_T_82573:
   7762 	case WM_T_82574:
   7763 	case WM_T_82583:
   7764 		 /* XXX should put sw semaphore, too */
   7765 		wm_put_swsm_semaphore(sc);
   7766 		break;
   7767 	case WM_T_82575:
   7768 	case WM_T_82576:
   7769 	case WM_T_82580:
   7770 	case WM_T_I350:
   7771 	case WM_T_I354:
   7772 	case WM_T_I210:
   7773 	case WM_T_I211:
   7774 	case WM_T_80003:
   7775 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7776 		break;
   7777 	case WM_T_ICH8:
   7778 	case WM_T_ICH9:
   7779 	case WM_T_ICH10:
   7780 	case WM_T_PCH:
   7781 	case WM_T_PCH2:
   7782 	case WM_T_PCH_LPT:
   7783 		wm_put_swfwhw_semaphore(sc);
   7784 		break;
   7785 	default:
   7786 		/* nothing to do*/
   7787 		rv = 0;
   7788 		break;
   7789 	}
   7790 
   7791 	/* get_cfg_done */
   7792 	wm_get_cfg_done(sc);
   7793 
   7794 	/* extra setup */
   7795 	switch (sc->sc_type) {
   7796 	case WM_T_82542_2_0:
   7797 	case WM_T_82542_2_1:
   7798 	case WM_T_82543:
   7799 	case WM_T_82544:
   7800 	case WM_T_82540:
   7801 	case WM_T_82545:
   7802 	case WM_T_82545_3:
   7803 	case WM_T_82546:
   7804 	case WM_T_82546_3:
   7805 	case WM_T_82541_2:
   7806 	case WM_T_82547_2:
   7807 	case WM_T_82571:
   7808 	case WM_T_82572:
   7809 	case WM_T_82573:
   7810 	case WM_T_82575:
   7811 	case WM_T_82576:
   7812 	case WM_T_82580:
   7813 	case WM_T_I350:
   7814 	case WM_T_I354:
   7815 	case WM_T_I210:
   7816 	case WM_T_I211:
   7817 	case WM_T_80003:
   7818 		/* null */
   7819 		break;
   7820 	case WM_T_82574:
   7821 	case WM_T_82583:
   7822 		wm_lplu_d0_disable(sc);
   7823 		break;
   7824 	case WM_T_82541:
   7825 	case WM_T_82547:
   7826 		/* XXX Configure actively LED after PHY reset */
   7827 		break;
   7828 	case WM_T_ICH8:
   7829 	case WM_T_ICH9:
   7830 	case WM_T_ICH10:
   7831 	case WM_T_PCH:
   7832 	case WM_T_PCH2:
   7833 	case WM_T_PCH_LPT:
   7834 		/* Allow time for h/w to get to a quiescent state afer reset */
   7835 		delay(10*1000);
   7836 
   7837 		if (sc->sc_type == WM_T_PCH)
   7838 			wm_hv_phy_workaround_ich8lan(sc);
   7839 
   7840 		if (sc->sc_type == WM_T_PCH2)
   7841 			wm_lv_phy_workaround_ich8lan(sc);
   7842 
   7843 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7844 			/*
   7845 			 * dummy read to clear the phy wakeup bit after lcd
   7846 			 * reset
   7847 			 */
   7848 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7849 		}
   7850 
   7851 		/*
   7852 		 * XXX Configure the LCD with th extended configuration region
   7853 		 * in NVM
   7854 		 */
   7855 
   7856 		/* Disable D0 LPLU. */
   7857 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7858 			wm_lplu_d0_disable_pch(sc);
   7859 		else
   7860 			wm_lplu_d0_disable(sc);	/* ICH* */
   7861 		break;
   7862 	default:
   7863 		panic("%s: unknown type\n", __func__);
   7864 		break;
   7865 	}
   7866 }
   7867 
   7868 /*
   7869  * wm_get_phy_id_82575:
   7870  *
   7871  * Return PHY ID. Return -1 if it failed.
   7872  */
   7873 static int
   7874 wm_get_phy_id_82575(struct wm_softc *sc)
   7875 {
   7876 	uint32_t reg;
   7877 	int phyid = -1;
   7878 
   7879 	/* XXX */
   7880 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7881 		return -1;
   7882 
   7883 	if (wm_sgmii_uses_mdio(sc)) {
   7884 		switch (sc->sc_type) {
   7885 		case WM_T_82575:
   7886 		case WM_T_82576:
   7887 			reg = CSR_READ(sc, WMREG_MDIC);
   7888 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7889 			break;
   7890 		case WM_T_82580:
   7891 		case WM_T_I350:
   7892 		case WM_T_I354:
   7893 		case WM_T_I210:
   7894 		case WM_T_I211:
   7895 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7896 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7897 			break;
   7898 		default:
   7899 			return -1;
   7900 		}
   7901 	}
   7902 
   7903 	return phyid;
   7904 }
   7905 
   7906 
   7907 /*
   7908  * wm_gmii_mediainit:
   7909  *
   7910  *	Initialize media for use on 1000BASE-T devices.
   7911  */
   7912 static void
   7913 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   7914 {
   7915 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7916 	struct mii_data *mii = &sc->sc_mii;
   7917 	uint32_t reg;
   7918 
   7919 	/* We have GMII. */
   7920 	sc->sc_flags |= WM_F_HAS_MII;
   7921 
   7922 	if (sc->sc_type == WM_T_80003)
   7923 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7924 	else
   7925 		sc->sc_tipg = TIPG_1000T_DFLT;
   7926 
   7927 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   7928 	if ((sc->sc_type == WM_T_82580)
   7929 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   7930 	    || (sc->sc_type == WM_T_I211)) {
   7931 		reg = CSR_READ(sc, WMREG_PHPM);
   7932 		reg &= ~PHPM_GO_LINK_D;
   7933 		CSR_WRITE(sc, WMREG_PHPM, reg);
   7934 	}
   7935 
   7936 	/*
   7937 	 * Let the chip set speed/duplex on its own based on
   7938 	 * signals from the PHY.
   7939 	 * XXXbouyer - I'm not sure this is right for the 80003,
   7940 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   7941 	 */
   7942 	sc->sc_ctrl |= CTRL_SLU;
   7943 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7944 
   7945 	/* Initialize our media structures and probe the GMII. */
   7946 	mii->mii_ifp = ifp;
   7947 
   7948 	/*
   7949 	 * Determine the PHY access method.
   7950 	 *
   7951 	 *  For SGMII, use SGMII specific method.
   7952 	 *
   7953 	 *  For some devices, we can determine the PHY access method
   7954 	 * from sc_type.
   7955 	 *
   7956 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   7957 	 * access  method by sc_type, so use the PCI product ID for some
   7958 	 * devices.
   7959 	 * For other ICH8 variants, try to use igp's method. If the PHY
   7960 	 * can't detect, then use bm's method.
   7961 	 */
   7962 	switch (prodid) {
   7963 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   7964 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   7965 		/* 82577 */
   7966 		sc->sc_phytype = WMPHY_82577;
   7967 		break;
   7968 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   7969 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   7970 		/* 82578 */
   7971 		sc->sc_phytype = WMPHY_82578;
   7972 		break;
   7973 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   7974 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   7975 		/* 82579 */
   7976 		sc->sc_phytype = WMPHY_82579;
   7977 		break;
   7978 	case PCI_PRODUCT_INTEL_82801I_BM:
   7979 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   7980 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   7981 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   7982 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   7983 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   7984 		/* 82567 */
   7985 		sc->sc_phytype = WMPHY_BM;
   7986 		mii->mii_readreg = wm_gmii_bm_readreg;
   7987 		mii->mii_writereg = wm_gmii_bm_writereg;
   7988 		break;
   7989 	default:
   7990 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   7991 		    && !wm_sgmii_uses_mdio(sc)){
   7992 			/* SGMII */
   7993 			mii->mii_readreg = wm_sgmii_readreg;
   7994 			mii->mii_writereg = wm_sgmii_writereg;
   7995 		} else if (sc->sc_type >= WM_T_80003) {
   7996 			/* 80003 */
   7997 			mii->mii_readreg = wm_gmii_i80003_readreg;
   7998 			mii->mii_writereg = wm_gmii_i80003_writereg;
   7999 		} else if (sc->sc_type >= WM_T_I210) {
   8000 			/* I210 and I211 */
   8001 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8002 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8003 		} else if (sc->sc_type >= WM_T_82580) {
   8004 			/* 82580, I350 and I354 */
   8005 			sc->sc_phytype = WMPHY_82580;
   8006 			mii->mii_readreg = wm_gmii_82580_readreg;
   8007 			mii->mii_writereg = wm_gmii_82580_writereg;
   8008 		} else if (sc->sc_type >= WM_T_82544) {
   8009 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8010 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8011 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8012 		} else {
   8013 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8014 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8015 		}
   8016 		break;
   8017 	}
   8018 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   8019 		/* All PCH* use _hv_ */
   8020 		mii->mii_readreg = wm_gmii_hv_readreg;
   8021 		mii->mii_writereg = wm_gmii_hv_writereg;
   8022 	}
   8023 	mii->mii_statchg = wm_gmii_statchg;
   8024 
   8025 	wm_gmii_reset(sc);
   8026 
   8027 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8028 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8029 	    wm_gmii_mediastatus);
   8030 
   8031 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8032 	    || (sc->sc_type == WM_T_82580)
   8033 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8034 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8035 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8036 			/* Attach only one port */
   8037 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8038 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8039 		} else {
   8040 			int i, id;
   8041 			uint32_t ctrl_ext;
   8042 
   8043 			id = wm_get_phy_id_82575(sc);
   8044 			if (id != -1) {
   8045 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8046 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8047 			}
   8048 			if ((id == -1)
   8049 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8050 				/* Power on sgmii phy if it is disabled */
   8051 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8052 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8053 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8054 				CSR_WRITE_FLUSH(sc);
   8055 				delay(300*1000); /* XXX too long */
   8056 
   8057 				/* from 1 to 8 */
   8058 				for (i = 1; i < 8; i++)
   8059 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8060 					    0xffffffff, i, MII_OFFSET_ANY,
   8061 					    MIIF_DOPAUSE);
   8062 
   8063 				/* restore previous sfp cage power state */
   8064 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8065 			}
   8066 		}
   8067 	} else {
   8068 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8069 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8070 	}
   8071 
   8072 	/*
   8073 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8074 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8075 	 */
   8076 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8077 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8078 		wm_set_mdio_slow_mode_hv(sc);
   8079 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8080 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8081 	}
   8082 
   8083 	/*
   8084 	 * (For ICH8 variants)
   8085 	 * If PHY detection failed, use BM's r/w function and retry.
   8086 	 */
   8087 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8088 		/* if failed, retry with *_bm_* */
   8089 		mii->mii_readreg = wm_gmii_bm_readreg;
   8090 		mii->mii_writereg = wm_gmii_bm_writereg;
   8091 
   8092 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8093 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8094 	}
   8095 
   8096 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8097 		/* Any PHY wasn't find */
   8098 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8099 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8100 		sc->sc_phytype = WMPHY_NONE;
   8101 	} else {
   8102 		/*
   8103 		 * PHY Found!
   8104 		 * Check PHY type.
   8105 		 */
   8106 		uint32_t model;
   8107 		struct mii_softc *child;
   8108 
   8109 		child = LIST_FIRST(&mii->mii_phys);
   8110 		model = child->mii_mpd_model;
   8111 		if (model == MII_MODEL_yyINTEL_I82566)
   8112 			sc->sc_phytype = WMPHY_IGP_3;
   8113 
   8114 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8115 	}
   8116 }
   8117 
   8118 /*
   8119  * wm_gmii_mediachange:	[ifmedia interface function]
   8120  *
   8121  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8122  */
   8123 static int
   8124 wm_gmii_mediachange(struct ifnet *ifp)
   8125 {
   8126 	struct wm_softc *sc = ifp->if_softc;
   8127 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8128 	int rc;
   8129 
   8130 	if ((ifp->if_flags & IFF_UP) == 0)
   8131 		return 0;
   8132 
   8133 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8134 	sc->sc_ctrl |= CTRL_SLU;
   8135 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8136 	    || (sc->sc_type > WM_T_82543)) {
   8137 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8138 	} else {
   8139 		sc->sc_ctrl &= ~CTRL_ASDE;
   8140 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8141 		if (ife->ifm_media & IFM_FDX)
   8142 			sc->sc_ctrl |= CTRL_FD;
   8143 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8144 		case IFM_10_T:
   8145 			sc->sc_ctrl |= CTRL_SPEED_10;
   8146 			break;
   8147 		case IFM_100_TX:
   8148 			sc->sc_ctrl |= CTRL_SPEED_100;
   8149 			break;
   8150 		case IFM_1000_T:
   8151 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8152 			break;
   8153 		default:
   8154 			panic("wm_gmii_mediachange: bad media 0x%x",
   8155 			    ife->ifm_media);
   8156 		}
   8157 	}
   8158 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8159 	if (sc->sc_type <= WM_T_82543)
   8160 		wm_gmii_reset(sc);
   8161 
   8162 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8163 		return 0;
   8164 	return rc;
   8165 }
   8166 
   8167 /*
   8168  * wm_gmii_mediastatus:	[ifmedia interface function]
   8169  *
   8170  *	Get the current interface media status on a 1000BASE-T device.
   8171  */
   8172 static void
   8173 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8174 {
   8175 	struct wm_softc *sc = ifp->if_softc;
   8176 
   8177 	ether_mediastatus(ifp, ifmr);
   8178 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8179 	    | sc->sc_flowflags;
   8180 }
   8181 
   8182 #define	MDI_IO		CTRL_SWDPIN(2)
   8183 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8184 #define	MDI_CLK		CTRL_SWDPIN(3)
   8185 
   8186 static void
   8187 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8188 {
   8189 	uint32_t i, v;
   8190 
   8191 	v = CSR_READ(sc, WMREG_CTRL);
   8192 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8193 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8194 
   8195 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8196 		if (data & i)
   8197 			v |= MDI_IO;
   8198 		else
   8199 			v &= ~MDI_IO;
   8200 		CSR_WRITE(sc, WMREG_CTRL, v);
   8201 		CSR_WRITE_FLUSH(sc);
   8202 		delay(10);
   8203 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8204 		CSR_WRITE_FLUSH(sc);
   8205 		delay(10);
   8206 		CSR_WRITE(sc, WMREG_CTRL, v);
   8207 		CSR_WRITE_FLUSH(sc);
   8208 		delay(10);
   8209 	}
   8210 }
   8211 
   8212 static uint32_t
   8213 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8214 {
   8215 	uint32_t v, i, data = 0;
   8216 
   8217 	v = CSR_READ(sc, WMREG_CTRL);
   8218 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8219 	v |= CTRL_SWDPIO(3);
   8220 
   8221 	CSR_WRITE(sc, WMREG_CTRL, v);
   8222 	CSR_WRITE_FLUSH(sc);
   8223 	delay(10);
   8224 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8225 	CSR_WRITE_FLUSH(sc);
   8226 	delay(10);
   8227 	CSR_WRITE(sc, WMREG_CTRL, v);
   8228 	CSR_WRITE_FLUSH(sc);
   8229 	delay(10);
   8230 
   8231 	for (i = 0; i < 16; i++) {
   8232 		data <<= 1;
   8233 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8234 		CSR_WRITE_FLUSH(sc);
   8235 		delay(10);
   8236 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8237 			data |= 1;
   8238 		CSR_WRITE(sc, WMREG_CTRL, v);
   8239 		CSR_WRITE_FLUSH(sc);
   8240 		delay(10);
   8241 	}
   8242 
   8243 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8244 	CSR_WRITE_FLUSH(sc);
   8245 	delay(10);
   8246 	CSR_WRITE(sc, WMREG_CTRL, v);
   8247 	CSR_WRITE_FLUSH(sc);
   8248 	delay(10);
   8249 
   8250 	return data;
   8251 }
   8252 
   8253 #undef MDI_IO
   8254 #undef MDI_DIR
   8255 #undef MDI_CLK
   8256 
   8257 /*
   8258  * wm_gmii_i82543_readreg:	[mii interface function]
   8259  *
   8260  *	Read a PHY register on the GMII (i82543 version).
   8261  */
   8262 static int
   8263 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8264 {
   8265 	struct wm_softc *sc = device_private(self);
   8266 	int rv;
   8267 
   8268 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8269 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8270 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8271 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8272 
   8273 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8274 	    device_xname(sc->sc_dev), phy, reg, rv));
   8275 
   8276 	return rv;
   8277 }
   8278 
   8279 /*
   8280  * wm_gmii_i82543_writereg:	[mii interface function]
   8281  *
   8282  *	Write a PHY register on the GMII (i82543 version).
   8283  */
   8284 static void
   8285 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8286 {
   8287 	struct wm_softc *sc = device_private(self);
   8288 
   8289 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8290 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8291 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8292 	    (MII_COMMAND_START << 30), 32);
   8293 }
   8294 
   8295 /*
   8296  * wm_gmii_i82544_readreg:	[mii interface function]
   8297  *
   8298  *	Read a PHY register on the GMII.
   8299  */
   8300 static int
   8301 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8302 {
   8303 	struct wm_softc *sc = device_private(self);
   8304 	uint32_t mdic = 0;
   8305 	int i, rv;
   8306 
   8307 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8308 	    MDIC_REGADD(reg));
   8309 
   8310 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8311 		mdic = CSR_READ(sc, WMREG_MDIC);
   8312 		if (mdic & MDIC_READY)
   8313 			break;
   8314 		delay(50);
   8315 	}
   8316 
   8317 	if ((mdic & MDIC_READY) == 0) {
   8318 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8319 		    device_xname(sc->sc_dev), phy, reg);
   8320 		rv = 0;
   8321 	} else if (mdic & MDIC_E) {
   8322 #if 0 /* This is normal if no PHY is present. */
   8323 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8324 		    device_xname(sc->sc_dev), phy, reg);
   8325 #endif
   8326 		rv = 0;
   8327 	} else {
   8328 		rv = MDIC_DATA(mdic);
   8329 		if (rv == 0xffff)
   8330 			rv = 0;
   8331 	}
   8332 
   8333 	return rv;
   8334 }
   8335 
   8336 /*
   8337  * wm_gmii_i82544_writereg:	[mii interface function]
   8338  *
   8339  *	Write a PHY register on the GMII.
   8340  */
   8341 static void
   8342 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8343 {
   8344 	struct wm_softc *sc = device_private(self);
   8345 	uint32_t mdic = 0;
   8346 	int i;
   8347 
   8348 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8349 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8350 
   8351 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8352 		mdic = CSR_READ(sc, WMREG_MDIC);
   8353 		if (mdic & MDIC_READY)
   8354 			break;
   8355 		delay(50);
   8356 	}
   8357 
   8358 	if ((mdic & MDIC_READY) == 0)
   8359 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8360 		    device_xname(sc->sc_dev), phy, reg);
   8361 	else if (mdic & MDIC_E)
   8362 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8363 		    device_xname(sc->sc_dev), phy, reg);
   8364 }
   8365 
   8366 /*
   8367  * wm_gmii_i80003_readreg:	[mii interface function]
   8368  *
   8369  *	Read a PHY register on the kumeran
   8370  * This could be handled by the PHY layer if we didn't have to lock the
   8371  * ressource ...
   8372  */
   8373 static int
   8374 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8375 {
   8376 	struct wm_softc *sc = device_private(self);
   8377 	int sem;
   8378 	int rv;
   8379 
   8380 	if (phy != 1) /* only one PHY on kumeran bus */
   8381 		return 0;
   8382 
   8383 	sem = swfwphysem[sc->sc_funcid];
   8384 	if (wm_get_swfw_semaphore(sc, sem)) {
   8385 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8386 		    __func__);
   8387 		return 0;
   8388 	}
   8389 
   8390 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8391 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8392 		    reg >> GG82563_PAGE_SHIFT);
   8393 	} else {
   8394 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8395 		    reg >> GG82563_PAGE_SHIFT);
   8396 	}
   8397 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8398 	delay(200);
   8399 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8400 	delay(200);
   8401 
   8402 	wm_put_swfw_semaphore(sc, sem);
   8403 	return rv;
   8404 }
   8405 
   8406 /*
   8407  * wm_gmii_i80003_writereg:	[mii interface function]
   8408  *
   8409  *	Write a PHY register on the kumeran.
   8410  * This could be handled by the PHY layer if we didn't have to lock the
   8411  * ressource ...
   8412  */
   8413 static void
   8414 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8415 {
   8416 	struct wm_softc *sc = device_private(self);
   8417 	int sem;
   8418 
   8419 	if (phy != 1) /* only one PHY on kumeran bus */
   8420 		return;
   8421 
   8422 	sem = swfwphysem[sc->sc_funcid];
   8423 	if (wm_get_swfw_semaphore(sc, sem)) {
   8424 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8425 		    __func__);
   8426 		return;
   8427 	}
   8428 
   8429 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8430 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8431 		    reg >> GG82563_PAGE_SHIFT);
   8432 	} else {
   8433 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8434 		    reg >> GG82563_PAGE_SHIFT);
   8435 	}
   8436 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8437 	delay(200);
   8438 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8439 	delay(200);
   8440 
   8441 	wm_put_swfw_semaphore(sc, sem);
   8442 }
   8443 
   8444 /*
   8445  * wm_gmii_bm_readreg:	[mii interface function]
   8446  *
   8447  *	Read a PHY register on the kumeran
   8448  * This could be handled by the PHY layer if we didn't have to lock the
   8449  * ressource ...
   8450  */
   8451 static int
   8452 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8453 {
   8454 	struct wm_softc *sc = device_private(self);
   8455 	int sem;
   8456 	int rv;
   8457 
   8458 	sem = swfwphysem[sc->sc_funcid];
   8459 	if (wm_get_swfw_semaphore(sc, sem)) {
   8460 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8461 		    __func__);
   8462 		return 0;
   8463 	}
   8464 
   8465 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8466 		if (phy == 1)
   8467 			wm_gmii_i82544_writereg(self, phy,
   8468 			    MII_IGPHY_PAGE_SELECT, reg);
   8469 		else
   8470 			wm_gmii_i82544_writereg(self, phy,
   8471 			    GG82563_PHY_PAGE_SELECT,
   8472 			    reg >> GG82563_PAGE_SHIFT);
   8473 	}
   8474 
   8475 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8476 	wm_put_swfw_semaphore(sc, sem);
   8477 	return rv;
   8478 }
   8479 
   8480 /*
   8481  * wm_gmii_bm_writereg:	[mii interface function]
   8482  *
   8483  *	Write a PHY register on the kumeran.
   8484  * This could be handled by the PHY layer if we didn't have to lock the
   8485  * ressource ...
   8486  */
   8487 static void
   8488 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8489 {
   8490 	struct wm_softc *sc = device_private(self);
   8491 	int sem;
   8492 
   8493 	sem = swfwphysem[sc->sc_funcid];
   8494 	if (wm_get_swfw_semaphore(sc, sem)) {
   8495 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8496 		    __func__);
   8497 		return;
   8498 	}
   8499 
   8500 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8501 		if (phy == 1)
   8502 			wm_gmii_i82544_writereg(self, phy,
   8503 			    MII_IGPHY_PAGE_SELECT, reg);
   8504 		else
   8505 			wm_gmii_i82544_writereg(self, phy,
   8506 			    GG82563_PHY_PAGE_SELECT,
   8507 			    reg >> GG82563_PAGE_SHIFT);
   8508 	}
   8509 
   8510 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8511 	wm_put_swfw_semaphore(sc, sem);
   8512 }
   8513 
   8514 static void
   8515 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8516 {
   8517 	struct wm_softc *sc = device_private(self);
   8518 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8519 	uint16_t wuce;
   8520 
   8521 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8522 	if (sc->sc_type == WM_T_PCH) {
   8523 		/* XXX e1000 driver do nothing... why? */
   8524 	}
   8525 
   8526 	/* Set page 769 */
   8527 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8528 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8529 
   8530 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8531 
   8532 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8533 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8534 	    wuce | BM_WUC_ENABLE_BIT);
   8535 
   8536 	/* Select page 800 */
   8537 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8538 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8539 
   8540 	/* Write page 800 */
   8541 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8542 
   8543 	if (rd)
   8544 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8545 	else
   8546 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8547 
   8548 	/* Set page 769 */
   8549 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8550 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8551 
   8552 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8553 }
   8554 
   8555 /*
   8556  * wm_gmii_hv_readreg:	[mii interface function]
   8557  *
   8558  *	Read a PHY register on the kumeran
   8559  * This could be handled by the PHY layer if we didn't have to lock the
   8560  * ressource ...
   8561  */
   8562 static int
   8563 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8564 {
   8565 	struct wm_softc *sc = device_private(self);
   8566 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8567 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8568 	uint16_t val;
   8569 	int rv;
   8570 
   8571 	if (wm_get_swfwhw_semaphore(sc)) {
   8572 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8573 		    __func__);
   8574 		return 0;
   8575 	}
   8576 
   8577 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8578 	if (sc->sc_phytype == WMPHY_82577) {
   8579 		/* XXX must write */
   8580 	}
   8581 
   8582 	/* Page 800 works differently than the rest so it has its own func */
   8583 	if (page == BM_WUC_PAGE) {
   8584 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8585 		return val;
   8586 	}
   8587 
   8588 	/*
   8589 	 * Lower than page 768 works differently than the rest so it has its
   8590 	 * own func
   8591 	 */
   8592 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8593 		printf("gmii_hv_readreg!!!\n");
   8594 		return 0;
   8595 	}
   8596 
   8597 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8598 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8599 		    page << BME1000_PAGE_SHIFT);
   8600 	}
   8601 
   8602 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8603 	wm_put_swfwhw_semaphore(sc);
   8604 	return rv;
   8605 }
   8606 
   8607 /*
   8608  * wm_gmii_hv_writereg:	[mii interface function]
   8609  *
   8610  *	Write a PHY register on the kumeran.
   8611  * This could be handled by the PHY layer if we didn't have to lock the
   8612  * ressource ...
   8613  */
   8614 static void
   8615 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8616 {
   8617 	struct wm_softc *sc = device_private(self);
   8618 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8619 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8620 
   8621 	if (wm_get_swfwhw_semaphore(sc)) {
   8622 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8623 		    __func__);
   8624 		return;
   8625 	}
   8626 
   8627 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8628 
   8629 	/* Page 800 works differently than the rest so it has its own func */
   8630 	if (page == BM_WUC_PAGE) {
   8631 		uint16_t tmp;
   8632 
   8633 		tmp = val;
   8634 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8635 		return;
   8636 	}
   8637 
   8638 	/*
   8639 	 * Lower than page 768 works differently than the rest so it has its
   8640 	 * own func
   8641 	 */
   8642 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8643 		printf("gmii_hv_writereg!!!\n");
   8644 		return;
   8645 	}
   8646 
   8647 	/*
   8648 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8649 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8650 	 */
   8651 
   8652 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8653 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8654 		    page << BME1000_PAGE_SHIFT);
   8655 	}
   8656 
   8657 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8658 	wm_put_swfwhw_semaphore(sc);
   8659 }
   8660 
   8661 /*
   8662  * wm_gmii_82580_readreg:	[mii interface function]
   8663  *
   8664  *	Read a PHY register on the 82580 and I350.
   8665  * This could be handled by the PHY layer if we didn't have to lock the
   8666  * ressource ...
   8667  */
   8668 static int
   8669 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8670 {
   8671 	struct wm_softc *sc = device_private(self);
   8672 	int sem;
   8673 	int rv;
   8674 
   8675 	sem = swfwphysem[sc->sc_funcid];
   8676 	if (wm_get_swfw_semaphore(sc, sem)) {
   8677 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8678 		    __func__);
   8679 		return 0;
   8680 	}
   8681 
   8682 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8683 
   8684 	wm_put_swfw_semaphore(sc, sem);
   8685 	return rv;
   8686 }
   8687 
   8688 /*
   8689  * wm_gmii_82580_writereg:	[mii interface function]
   8690  *
   8691  *	Write a PHY register on the 82580 and I350.
   8692  * This could be handled by the PHY layer if we didn't have to lock the
   8693  * ressource ...
   8694  */
   8695 static void
   8696 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8697 {
   8698 	struct wm_softc *sc = device_private(self);
   8699 	int sem;
   8700 
   8701 	sem = swfwphysem[sc->sc_funcid];
   8702 	if (wm_get_swfw_semaphore(sc, sem)) {
   8703 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8704 		    __func__);
   8705 		return;
   8706 	}
   8707 
   8708 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8709 
   8710 	wm_put_swfw_semaphore(sc, sem);
   8711 }
   8712 
   8713 /*
   8714  * wm_gmii_gs40g_readreg:	[mii interface function]
   8715  *
   8716  *	Read a PHY register on the I2100 and I211.
   8717  * This could be handled by the PHY layer if we didn't have to lock the
   8718  * ressource ...
   8719  */
   8720 static int
   8721 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8722 {
   8723 	struct wm_softc *sc = device_private(self);
   8724 	int sem;
   8725 	int page, offset;
   8726 	int rv;
   8727 
   8728 	/* Acquire semaphore */
   8729 	sem = swfwphysem[sc->sc_funcid];
   8730 	if (wm_get_swfw_semaphore(sc, sem)) {
   8731 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8732 		    __func__);
   8733 		return 0;
   8734 	}
   8735 
   8736 	/* Page select */
   8737 	page = reg >> GS40G_PAGE_SHIFT;
   8738 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8739 
   8740 	/* Read reg */
   8741 	offset = reg & GS40G_OFFSET_MASK;
   8742 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8743 
   8744 	wm_put_swfw_semaphore(sc, sem);
   8745 	return rv;
   8746 }
   8747 
   8748 /*
   8749  * wm_gmii_gs40g_writereg:	[mii interface function]
   8750  *
   8751  *	Write a PHY register on the I210 and I211.
   8752  * This could be handled by the PHY layer if we didn't have to lock the
   8753  * ressource ...
   8754  */
   8755 static void
   8756 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8757 {
   8758 	struct wm_softc *sc = device_private(self);
   8759 	int sem;
   8760 	int page, offset;
   8761 
   8762 	/* Acquire semaphore */
   8763 	sem = swfwphysem[sc->sc_funcid];
   8764 	if (wm_get_swfw_semaphore(sc, sem)) {
   8765 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8766 		    __func__);
   8767 		return;
   8768 	}
   8769 
   8770 	/* Page select */
   8771 	page = reg >> GS40G_PAGE_SHIFT;
   8772 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8773 
   8774 	/* Write reg */
   8775 	offset = reg & GS40G_OFFSET_MASK;
   8776 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8777 
   8778 	/* Release semaphore */
   8779 	wm_put_swfw_semaphore(sc, sem);
   8780 }
   8781 
   8782 /*
   8783  * wm_gmii_statchg:	[mii interface function]
   8784  *
   8785  *	Callback from MII layer when media changes.
   8786  */
   8787 static void
   8788 wm_gmii_statchg(struct ifnet *ifp)
   8789 {
   8790 	struct wm_softc *sc = ifp->if_softc;
   8791 	struct mii_data *mii = &sc->sc_mii;
   8792 
   8793 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8794 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8795 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8796 
   8797 	/*
   8798 	 * Get flow control negotiation result.
   8799 	 */
   8800 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8801 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8802 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8803 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8804 	}
   8805 
   8806 	if (sc->sc_flowflags & IFM_FLOW) {
   8807 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8808 			sc->sc_ctrl |= CTRL_TFCE;
   8809 			sc->sc_fcrtl |= FCRTL_XONE;
   8810 		}
   8811 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8812 			sc->sc_ctrl |= CTRL_RFCE;
   8813 	}
   8814 
   8815 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8816 		DPRINTF(WM_DEBUG_LINK,
   8817 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8818 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8819 	} else {
   8820 		DPRINTF(WM_DEBUG_LINK,
   8821 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8822 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8823 	}
   8824 
   8825 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8826 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8827 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8828 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8829 	if (sc->sc_type == WM_T_80003) {
   8830 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8831 		case IFM_1000_T:
   8832 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8833 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8834 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8835 			break;
   8836 		default:
   8837 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8838 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8839 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8840 			break;
   8841 		}
   8842 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8843 	}
   8844 }
   8845 
   8846 /*
   8847  * wm_kmrn_readreg:
   8848  *
   8849  *	Read a kumeran register
   8850  */
   8851 static int
   8852 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8853 {
   8854 	int rv;
   8855 
   8856 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8857 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8858 			aprint_error_dev(sc->sc_dev,
   8859 			    "%s: failed to get semaphore\n", __func__);
   8860 			return 0;
   8861 		}
   8862 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8863 		if (wm_get_swfwhw_semaphore(sc)) {
   8864 			aprint_error_dev(sc->sc_dev,
   8865 			    "%s: failed to get semaphore\n", __func__);
   8866 			return 0;
   8867 		}
   8868 	}
   8869 
   8870 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8871 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8872 	    KUMCTRLSTA_REN);
   8873 	CSR_WRITE_FLUSH(sc);
   8874 	delay(2);
   8875 
   8876 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8877 
   8878 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8879 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8880 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8881 		wm_put_swfwhw_semaphore(sc);
   8882 
   8883 	return rv;
   8884 }
   8885 
   8886 /*
   8887  * wm_kmrn_writereg:
   8888  *
   8889  *	Write a kumeran register
   8890  */
   8891 static void
   8892 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8893 {
   8894 
   8895 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8896 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8897 			aprint_error_dev(sc->sc_dev,
   8898 			    "%s: failed to get semaphore\n", __func__);
   8899 			return;
   8900 		}
   8901 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8902 		if (wm_get_swfwhw_semaphore(sc)) {
   8903 			aprint_error_dev(sc->sc_dev,
   8904 			    "%s: failed to get semaphore\n", __func__);
   8905 			return;
   8906 		}
   8907 	}
   8908 
   8909 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8910 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8911 	    (val & KUMCTRLSTA_MASK));
   8912 
   8913 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8914 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8915 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8916 		wm_put_swfwhw_semaphore(sc);
   8917 }
   8918 
   8919 /* SGMII related */
   8920 
   8921 /*
   8922  * wm_sgmii_uses_mdio
   8923  *
   8924  * Check whether the transaction is to the internal PHY or the external
   8925  * MDIO interface. Return true if it's MDIO.
   8926  */
   8927 static bool
   8928 wm_sgmii_uses_mdio(struct wm_softc *sc)
   8929 {
   8930 	uint32_t reg;
   8931 	bool ismdio = false;
   8932 
   8933 	switch (sc->sc_type) {
   8934 	case WM_T_82575:
   8935 	case WM_T_82576:
   8936 		reg = CSR_READ(sc, WMREG_MDIC);
   8937 		ismdio = ((reg & MDIC_DEST) != 0);
   8938 		break;
   8939 	case WM_T_82580:
   8940 	case WM_T_I350:
   8941 	case WM_T_I354:
   8942 	case WM_T_I210:
   8943 	case WM_T_I211:
   8944 		reg = CSR_READ(sc, WMREG_MDICNFG);
   8945 		ismdio = ((reg & MDICNFG_DEST) != 0);
   8946 		break;
   8947 	default:
   8948 		break;
   8949 	}
   8950 
   8951 	return ismdio;
   8952 }
   8953 
   8954 /*
   8955  * wm_sgmii_readreg:	[mii interface function]
   8956  *
   8957  *	Read a PHY register on the SGMII
   8958  * This could be handled by the PHY layer if we didn't have to lock the
   8959  * ressource ...
   8960  */
   8961 static int
   8962 wm_sgmii_readreg(device_t self, int phy, int reg)
   8963 {
   8964 	struct wm_softc *sc = device_private(self);
   8965 	uint32_t i2ccmd;
   8966 	int i, rv;
   8967 
   8968 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8969 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8970 		    __func__);
   8971 		return 0;
   8972 	}
   8973 
   8974 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8975 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8976 	    | I2CCMD_OPCODE_READ;
   8977 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8978 
   8979 	/* Poll the ready bit */
   8980 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8981 		delay(50);
   8982 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8983 		if (i2ccmd & I2CCMD_READY)
   8984 			break;
   8985 	}
   8986 	if ((i2ccmd & I2CCMD_READY) == 0)
   8987 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   8988 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8989 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8990 
   8991 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   8992 
   8993 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   8994 	return rv;
   8995 }
   8996 
   8997 /*
   8998  * wm_sgmii_writereg:	[mii interface function]
   8999  *
   9000  *	Write a PHY register on the SGMII.
   9001  * This could be handled by the PHY layer if we didn't have to lock the
   9002  * ressource ...
   9003  */
   9004 static void
   9005 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9006 {
   9007 	struct wm_softc *sc = device_private(self);
   9008 	uint32_t i2ccmd;
   9009 	int i;
   9010 	int val_swapped;
   9011 
   9012 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9013 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9014 		    __func__);
   9015 		return;
   9016 	}
   9017 	/* Swap the data bytes for the I2C interface */
   9018 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9019 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9020 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9021 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9022 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9023 
   9024 	/* Poll the ready bit */
   9025 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9026 		delay(50);
   9027 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9028 		if (i2ccmd & I2CCMD_READY)
   9029 			break;
   9030 	}
   9031 	if ((i2ccmd & I2CCMD_READY) == 0)
   9032 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9033 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9034 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9035 
   9036 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9037 }
   9038 
   9039 /* TBI related */
   9040 
   9041 /*
   9042  * wm_tbi_mediainit:
   9043  *
   9044  *	Initialize media for use on 1000BASE-X devices.
   9045  */
   9046 static void
   9047 wm_tbi_mediainit(struct wm_softc *sc)
   9048 {
   9049 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9050 	const char *sep = "";
   9051 
   9052 	if (sc->sc_type < WM_T_82543)
   9053 		sc->sc_tipg = TIPG_WM_DFLT;
   9054 	else
   9055 		sc->sc_tipg = TIPG_LG_DFLT;
   9056 
   9057 	sc->sc_tbi_serdes_anegticks = 5;
   9058 
   9059 	/* Initialize our media structures */
   9060 	sc->sc_mii.mii_ifp = ifp;
   9061 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9062 
   9063 	if ((sc->sc_type >= WM_T_82575)
   9064 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9065 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9066 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9067 	else
   9068 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9069 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9070 
   9071 	/*
   9072 	 * SWD Pins:
   9073 	 *
   9074 	 *	0 = Link LED (output)
   9075 	 *	1 = Loss Of Signal (input)
   9076 	 */
   9077 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9078 
   9079 	/* XXX Perhaps this is only for TBI */
   9080 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9081 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9082 
   9083 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9084 		sc->sc_ctrl &= ~CTRL_LRST;
   9085 
   9086 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9087 
   9088 #define	ADD(ss, mm, dd)							\
   9089 do {									\
   9090 	aprint_normal("%s%s", sep, ss);					\
   9091 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9092 	sep = ", ";							\
   9093 } while (/*CONSTCOND*/0)
   9094 
   9095 	aprint_normal_dev(sc->sc_dev, "");
   9096 
   9097 	/* Only 82545 is LX */
   9098 	if (sc->sc_type == WM_T_82545) {
   9099 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9100 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9101 	} else {
   9102 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9103 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9104 	}
   9105 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9106 	aprint_normal("\n");
   9107 
   9108 #undef ADD
   9109 
   9110 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9111 }
   9112 
   9113 /*
   9114  * wm_tbi_mediachange:	[ifmedia interface function]
   9115  *
   9116  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9117  */
   9118 static int
   9119 wm_tbi_mediachange(struct ifnet *ifp)
   9120 {
   9121 	struct wm_softc *sc = ifp->if_softc;
   9122 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9123 	uint32_t status;
   9124 	int i;
   9125 
   9126 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9127 		/* XXX need some work for >= 82571 and < 82575 */
   9128 		if (sc->sc_type < WM_T_82575)
   9129 			return 0;
   9130 	}
   9131 
   9132 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9133 	    || (sc->sc_type >= WM_T_82575))
   9134 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9135 
   9136 	sc->sc_ctrl &= ~CTRL_LRST;
   9137 	sc->sc_txcw = TXCW_ANE;
   9138 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9139 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9140 	else if (ife->ifm_media & IFM_FDX)
   9141 		sc->sc_txcw |= TXCW_FD;
   9142 	else
   9143 		sc->sc_txcw |= TXCW_HD;
   9144 
   9145 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9146 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9147 
   9148 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9149 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9150 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9151 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9152 	CSR_WRITE_FLUSH(sc);
   9153 	delay(1000);
   9154 
   9155 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9156 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9157 
   9158 	/*
   9159 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9160 	 * optics detect a signal, 0 if they don't.
   9161 	 */
   9162 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9163 		/* Have signal; wait for the link to come up. */
   9164 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9165 			delay(10000);
   9166 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9167 				break;
   9168 		}
   9169 
   9170 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9171 			    device_xname(sc->sc_dev),i));
   9172 
   9173 		status = CSR_READ(sc, WMREG_STATUS);
   9174 		DPRINTF(WM_DEBUG_LINK,
   9175 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9176 			device_xname(sc->sc_dev),status, STATUS_LU));
   9177 		if (status & STATUS_LU) {
   9178 			/* Link is up. */
   9179 			DPRINTF(WM_DEBUG_LINK,
   9180 			    ("%s: LINK: set media -> link up %s\n",
   9181 			    device_xname(sc->sc_dev),
   9182 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9183 
   9184 			/*
   9185 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9186 			 * so we should update sc->sc_ctrl
   9187 			 */
   9188 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9189 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9190 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9191 			if (status & STATUS_FD)
   9192 				sc->sc_tctl |=
   9193 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9194 			else
   9195 				sc->sc_tctl |=
   9196 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9197 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9198 				sc->sc_fcrtl |= FCRTL_XONE;
   9199 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9200 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9201 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9202 				      sc->sc_fcrtl);
   9203 			sc->sc_tbi_linkup = 1;
   9204 		} else {
   9205 			if (i == WM_LINKUP_TIMEOUT)
   9206 				wm_check_for_link(sc);
   9207 			/* Link is down. */
   9208 			DPRINTF(WM_DEBUG_LINK,
   9209 			    ("%s: LINK: set media -> link down\n",
   9210 			    device_xname(sc->sc_dev)));
   9211 			sc->sc_tbi_linkup = 0;
   9212 		}
   9213 	} else {
   9214 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9215 		    device_xname(sc->sc_dev)));
   9216 		sc->sc_tbi_linkup = 0;
   9217 	}
   9218 
   9219 	wm_tbi_serdes_set_linkled(sc);
   9220 
   9221 	return 0;
   9222 }
   9223 
   9224 /*
   9225  * wm_tbi_mediastatus:	[ifmedia interface function]
   9226  *
   9227  *	Get the current interface media status on a 1000BASE-X device.
   9228  */
   9229 static void
   9230 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9231 {
   9232 	struct wm_softc *sc = ifp->if_softc;
   9233 	uint32_t ctrl, status;
   9234 
   9235 	ifmr->ifm_status = IFM_AVALID;
   9236 	ifmr->ifm_active = IFM_ETHER;
   9237 
   9238 	status = CSR_READ(sc, WMREG_STATUS);
   9239 	if ((status & STATUS_LU) == 0) {
   9240 		ifmr->ifm_active |= IFM_NONE;
   9241 		return;
   9242 	}
   9243 
   9244 	ifmr->ifm_status |= IFM_ACTIVE;
   9245 	/* Only 82545 is LX */
   9246 	if (sc->sc_type == WM_T_82545)
   9247 		ifmr->ifm_active |= IFM_1000_LX;
   9248 	else
   9249 		ifmr->ifm_active |= IFM_1000_SX;
   9250 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9251 		ifmr->ifm_active |= IFM_FDX;
   9252 	else
   9253 		ifmr->ifm_active |= IFM_HDX;
   9254 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9255 	if (ctrl & CTRL_RFCE)
   9256 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9257 	if (ctrl & CTRL_TFCE)
   9258 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9259 }
   9260 
   9261 /* XXX TBI only */
   9262 static int
   9263 wm_check_for_link(struct wm_softc *sc)
   9264 {
   9265 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9266 	uint32_t rxcw;
   9267 	uint32_t ctrl;
   9268 	uint32_t status;
   9269 	uint32_t sig;
   9270 
   9271 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9272 		/* XXX need some work for >= 82571 */
   9273 		if (sc->sc_type >= WM_T_82571) {
   9274 			sc->sc_tbi_linkup = 1;
   9275 			return 0;
   9276 		}
   9277 	}
   9278 
   9279 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9280 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9281 	status = CSR_READ(sc, WMREG_STATUS);
   9282 
   9283 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9284 
   9285 	DPRINTF(WM_DEBUG_LINK,
   9286 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9287 		device_xname(sc->sc_dev), __func__,
   9288 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9289 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9290 
   9291 	/*
   9292 	 * SWDPIN   LU RXCW
   9293 	 *      0    0    0
   9294 	 *      0    0    1	(should not happen)
   9295 	 *      0    1    0	(should not happen)
   9296 	 *      0    1    1	(should not happen)
   9297 	 *      1    0    0	Disable autonego and force linkup
   9298 	 *      1    0    1	got /C/ but not linkup yet
   9299 	 *      1    1    0	(linkup)
   9300 	 *      1    1    1	If IFM_AUTO, back to autonego
   9301 	 *
   9302 	 */
   9303 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9304 	    && ((status & STATUS_LU) == 0)
   9305 	    && ((rxcw & RXCW_C) == 0)) {
   9306 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9307 			__func__));
   9308 		sc->sc_tbi_linkup = 0;
   9309 		/* Disable auto-negotiation in the TXCW register */
   9310 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9311 
   9312 		/*
   9313 		 * Force link-up and also force full-duplex.
   9314 		 *
   9315 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9316 		 * so we should update sc->sc_ctrl
   9317 		 */
   9318 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9319 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9320 	} else if (((status & STATUS_LU) != 0)
   9321 	    && ((rxcw & RXCW_C) != 0)
   9322 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9323 		sc->sc_tbi_linkup = 1;
   9324 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9325 			__func__));
   9326 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9327 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9328 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9329 	    && ((rxcw & RXCW_C) != 0)) {
   9330 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9331 	} else {
   9332 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9333 			status));
   9334 	}
   9335 
   9336 	return 0;
   9337 }
   9338 
   9339 /*
   9340  * wm_tbi_tick:
   9341  *
   9342  *	Check the link on TBI devices.
   9343  *	This function acts as mii_tick().
   9344  */
   9345 static void
   9346 wm_tbi_tick(struct wm_softc *sc)
   9347 {
   9348 	struct mii_data *mii = &sc->sc_mii;
   9349 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9350 	uint32_t status;
   9351 
   9352 	KASSERT(WM_CORE_LOCKED(sc));
   9353 
   9354 	status = CSR_READ(sc, WMREG_STATUS);
   9355 
   9356 	/* XXX is this needed? */
   9357 	(void)CSR_READ(sc, WMREG_RXCW);
   9358 	(void)CSR_READ(sc, WMREG_CTRL);
   9359 
   9360 	/* set link status */
   9361 	if ((status & STATUS_LU) == 0) {
   9362 		DPRINTF(WM_DEBUG_LINK,
   9363 		    ("%s: LINK: checklink -> down\n",
   9364 			device_xname(sc->sc_dev)));
   9365 		sc->sc_tbi_linkup = 0;
   9366 	} else if (sc->sc_tbi_linkup == 0) {
   9367 		DPRINTF(WM_DEBUG_LINK,
   9368 		    ("%s: LINK: checklink -> up %s\n",
   9369 			device_xname(sc->sc_dev),
   9370 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9371 		sc->sc_tbi_linkup = 1;
   9372 		sc->sc_tbi_serdes_ticks = 0;
   9373 	}
   9374 
   9375 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9376 		goto setled;
   9377 
   9378 	if ((status & STATUS_LU) == 0) {
   9379 		sc->sc_tbi_linkup = 0;
   9380 		/* If the timer expired, retry autonegotiation */
   9381 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9382 		    && (++sc->sc_tbi_serdes_ticks
   9383 			>= sc->sc_tbi_serdes_anegticks)) {
   9384 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9385 			sc->sc_tbi_serdes_ticks = 0;
   9386 			/*
   9387 			 * Reset the link, and let autonegotiation do
   9388 			 * its thing
   9389 			 */
   9390 			sc->sc_ctrl |= CTRL_LRST;
   9391 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9392 			CSR_WRITE_FLUSH(sc);
   9393 			delay(1000);
   9394 			sc->sc_ctrl &= ~CTRL_LRST;
   9395 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9396 			CSR_WRITE_FLUSH(sc);
   9397 			delay(1000);
   9398 			CSR_WRITE(sc, WMREG_TXCW,
   9399 			    sc->sc_txcw & ~TXCW_ANE);
   9400 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9401 		}
   9402 	}
   9403 
   9404 setled:
   9405 	wm_tbi_serdes_set_linkled(sc);
   9406 }
   9407 
   9408 /* SERDES related */
   9409 static void
   9410 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9411 {
   9412 	uint32_t reg;
   9413 
   9414 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9415 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9416 		return;
   9417 
   9418 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9419 	reg |= PCS_CFG_PCS_EN;
   9420 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9421 
   9422 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9423 	reg &= ~CTRL_EXT_SWDPIN(3);
   9424 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9425 	CSR_WRITE_FLUSH(sc);
   9426 }
   9427 
   9428 static int
   9429 wm_serdes_mediachange(struct ifnet *ifp)
   9430 {
   9431 	struct wm_softc *sc = ifp->if_softc;
   9432 	bool pcs_autoneg = true; /* XXX */
   9433 	uint32_t ctrl_ext, pcs_lctl, reg;
   9434 
   9435 	/* XXX Currently, this function is not called on 8257[12] */
   9436 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9437 	    || (sc->sc_type >= WM_T_82575))
   9438 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9439 
   9440 	wm_serdes_power_up_link_82575(sc);
   9441 
   9442 	sc->sc_ctrl |= CTRL_SLU;
   9443 
   9444 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9445 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9446 
   9447 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9448 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9449 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9450 	case CTRL_EXT_LINK_MODE_SGMII:
   9451 		pcs_autoneg = true;
   9452 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9453 		break;
   9454 	case CTRL_EXT_LINK_MODE_1000KX:
   9455 		pcs_autoneg = false;
   9456 		/* FALLTHROUGH */
   9457 	default:
   9458 		if ((sc->sc_type == WM_T_82575)
   9459 		    || (sc->sc_type == WM_T_82576)) {
   9460 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9461 				pcs_autoneg = false;
   9462 		}
   9463 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9464 		    | CTRL_FRCFDX;
   9465 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9466 	}
   9467 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9468 
   9469 	if (pcs_autoneg) {
   9470 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9471 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9472 
   9473 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9474 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9475 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9476 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9477 	} else
   9478 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9479 
   9480 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9481 
   9482 
   9483 	return 0;
   9484 }
   9485 
   9486 static void
   9487 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9488 {
   9489 	struct wm_softc *sc = ifp->if_softc;
   9490 	struct mii_data *mii = &sc->sc_mii;
   9491 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9492 	uint32_t pcs_adv, pcs_lpab, reg;
   9493 
   9494 	ifmr->ifm_status = IFM_AVALID;
   9495 	ifmr->ifm_active = IFM_ETHER;
   9496 
   9497 	/* Check PCS */
   9498 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9499 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9500 		ifmr->ifm_active |= IFM_NONE;
   9501 		sc->sc_tbi_linkup = 0;
   9502 		goto setled;
   9503 	}
   9504 
   9505 	sc->sc_tbi_linkup = 1;
   9506 	ifmr->ifm_status |= IFM_ACTIVE;
   9507 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9508 	if ((reg & PCS_LSTS_FDX) != 0)
   9509 		ifmr->ifm_active |= IFM_FDX;
   9510 	else
   9511 		ifmr->ifm_active |= IFM_HDX;
   9512 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9513 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9514 		/* Check flow */
   9515 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9516 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9517 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9518 			goto setled;
   9519 		}
   9520 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9521 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9522 		DPRINTF(WM_DEBUG_LINK,
   9523 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9524 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9525 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9526 			mii->mii_media_active |= IFM_FLOW
   9527 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9528 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9529 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9530 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9531 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9532 			mii->mii_media_active |= IFM_FLOW
   9533 			    | IFM_ETH_TXPAUSE;
   9534 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9535 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9536 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9537 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9538 			mii->mii_media_active |= IFM_FLOW
   9539 			    | IFM_ETH_RXPAUSE;
   9540 		} else {
   9541 		}
   9542 	}
   9543 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9544 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9545 setled:
   9546 	wm_tbi_serdes_set_linkled(sc);
   9547 }
   9548 
   9549 /*
   9550  * wm_serdes_tick:
   9551  *
   9552  *	Check the link on serdes devices.
   9553  */
   9554 static void
   9555 wm_serdes_tick(struct wm_softc *sc)
   9556 {
   9557 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9558 	struct mii_data *mii = &sc->sc_mii;
   9559 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9560 	uint32_t reg;
   9561 
   9562 	KASSERT(WM_CORE_LOCKED(sc));
   9563 
   9564 	mii->mii_media_status = IFM_AVALID;
   9565 	mii->mii_media_active = IFM_ETHER;
   9566 
   9567 	/* Check PCS */
   9568 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9569 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9570 		mii->mii_media_status |= IFM_ACTIVE;
   9571 		sc->sc_tbi_linkup = 1;
   9572 		sc->sc_tbi_serdes_ticks = 0;
   9573 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9574 		if ((reg & PCS_LSTS_FDX) != 0)
   9575 			mii->mii_media_active |= IFM_FDX;
   9576 		else
   9577 			mii->mii_media_active |= IFM_HDX;
   9578 	} else {
   9579 		mii->mii_media_status |= IFM_NONE;
   9580 		sc->sc_tbi_linkup = 0;
   9581 		    /* If the timer expired, retry autonegotiation */
   9582 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9583 		    && (++sc->sc_tbi_serdes_ticks
   9584 			>= sc->sc_tbi_serdes_anegticks)) {
   9585 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9586 			sc->sc_tbi_serdes_ticks = 0;
   9587 			/* XXX */
   9588 			wm_serdes_mediachange(ifp);
   9589 		}
   9590 	}
   9591 
   9592 	wm_tbi_serdes_set_linkled(sc);
   9593 }
   9594 
   9595 /* SFP related */
   9596 
   9597 static int
   9598 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9599 {
   9600 	uint32_t i2ccmd;
   9601 	int i;
   9602 
   9603 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9604 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9605 
   9606 	/* Poll the ready bit */
   9607 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9608 		delay(50);
   9609 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9610 		if (i2ccmd & I2CCMD_READY)
   9611 			break;
   9612 	}
   9613 	if ((i2ccmd & I2CCMD_READY) == 0)
   9614 		return -1;
   9615 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9616 		return -1;
   9617 
   9618 	*data = i2ccmd & 0x00ff;
   9619 
   9620 	return 0;
   9621 }
   9622 
   9623 static uint32_t
   9624 wm_sfp_get_media_type(struct wm_softc *sc)
   9625 {
   9626 	uint32_t ctrl_ext;
   9627 	uint8_t val = 0;
   9628 	int timeout = 3;
   9629 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9630 	int rv = -1;
   9631 
   9632 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9633 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9634 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9635 	CSR_WRITE_FLUSH(sc);
   9636 
   9637 	/* Read SFP module data */
   9638 	while (timeout) {
   9639 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9640 		if (rv == 0)
   9641 			break;
   9642 		delay(100*1000); /* XXX too big */
   9643 		timeout--;
   9644 	}
   9645 	if (rv != 0)
   9646 		goto out;
   9647 	switch (val) {
   9648 	case SFF_SFP_ID_SFF:
   9649 		aprint_normal_dev(sc->sc_dev,
   9650 		    "Module/Connector soldered to board\n");
   9651 		break;
   9652 	case SFF_SFP_ID_SFP:
   9653 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9654 		break;
   9655 	case SFF_SFP_ID_UNKNOWN:
   9656 		goto out;
   9657 	default:
   9658 		break;
   9659 	}
   9660 
   9661 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9662 	if (rv != 0) {
   9663 		goto out;
   9664 	}
   9665 
   9666 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9667 		mediatype = WM_MEDIATYPE_SERDES;
   9668 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9669 		sc->sc_flags |= WM_F_SGMII;
   9670 		mediatype = WM_MEDIATYPE_COPPER;
   9671 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9672 		sc->sc_flags |= WM_F_SGMII;
   9673 		mediatype = WM_MEDIATYPE_SERDES;
   9674 	}
   9675 
   9676 out:
   9677 	/* Restore I2C interface setting */
   9678 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9679 
   9680 	return mediatype;
   9681 }
   9682 /*
   9683  * NVM related.
   9684  * Microwire, SPI (w/wo EERD) and Flash.
   9685  */
   9686 
   9687 /* Both spi and uwire */
   9688 
   9689 /*
   9690  * wm_eeprom_sendbits:
   9691  *
   9692  *	Send a series of bits to the EEPROM.
   9693  */
   9694 static void
   9695 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9696 {
   9697 	uint32_t reg;
   9698 	int x;
   9699 
   9700 	reg = CSR_READ(sc, WMREG_EECD);
   9701 
   9702 	for (x = nbits; x > 0; x--) {
   9703 		if (bits & (1U << (x - 1)))
   9704 			reg |= EECD_DI;
   9705 		else
   9706 			reg &= ~EECD_DI;
   9707 		CSR_WRITE(sc, WMREG_EECD, reg);
   9708 		CSR_WRITE_FLUSH(sc);
   9709 		delay(2);
   9710 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9711 		CSR_WRITE_FLUSH(sc);
   9712 		delay(2);
   9713 		CSR_WRITE(sc, WMREG_EECD, reg);
   9714 		CSR_WRITE_FLUSH(sc);
   9715 		delay(2);
   9716 	}
   9717 }
   9718 
   9719 /*
   9720  * wm_eeprom_recvbits:
   9721  *
   9722  *	Receive a series of bits from the EEPROM.
   9723  */
   9724 static void
   9725 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9726 {
   9727 	uint32_t reg, val;
   9728 	int x;
   9729 
   9730 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9731 
   9732 	val = 0;
   9733 	for (x = nbits; x > 0; x--) {
   9734 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9735 		CSR_WRITE_FLUSH(sc);
   9736 		delay(2);
   9737 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9738 			val |= (1U << (x - 1));
   9739 		CSR_WRITE(sc, WMREG_EECD, reg);
   9740 		CSR_WRITE_FLUSH(sc);
   9741 		delay(2);
   9742 	}
   9743 	*valp = val;
   9744 }
   9745 
   9746 /* Microwire */
   9747 
   9748 /*
   9749  * wm_nvm_read_uwire:
   9750  *
   9751  *	Read a word from the EEPROM using the MicroWire protocol.
   9752  */
   9753 static int
   9754 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9755 {
   9756 	uint32_t reg, val;
   9757 	int i;
   9758 
   9759 	for (i = 0; i < wordcnt; i++) {
   9760 		/* Clear SK and DI. */
   9761 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9762 		CSR_WRITE(sc, WMREG_EECD, reg);
   9763 
   9764 		/*
   9765 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9766 		 * and Xen.
   9767 		 *
   9768 		 * We use this workaround only for 82540 because qemu's
   9769 		 * e1000 act as 82540.
   9770 		 */
   9771 		if (sc->sc_type == WM_T_82540) {
   9772 			reg |= EECD_SK;
   9773 			CSR_WRITE(sc, WMREG_EECD, reg);
   9774 			reg &= ~EECD_SK;
   9775 			CSR_WRITE(sc, WMREG_EECD, reg);
   9776 			CSR_WRITE_FLUSH(sc);
   9777 			delay(2);
   9778 		}
   9779 		/* XXX: end of workaround */
   9780 
   9781 		/* Set CHIP SELECT. */
   9782 		reg |= EECD_CS;
   9783 		CSR_WRITE(sc, WMREG_EECD, reg);
   9784 		CSR_WRITE_FLUSH(sc);
   9785 		delay(2);
   9786 
   9787 		/* Shift in the READ command. */
   9788 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9789 
   9790 		/* Shift in address. */
   9791 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9792 
   9793 		/* Shift out the data. */
   9794 		wm_eeprom_recvbits(sc, &val, 16);
   9795 		data[i] = val & 0xffff;
   9796 
   9797 		/* Clear CHIP SELECT. */
   9798 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9799 		CSR_WRITE(sc, WMREG_EECD, reg);
   9800 		CSR_WRITE_FLUSH(sc);
   9801 		delay(2);
   9802 	}
   9803 
   9804 	return 0;
   9805 }
   9806 
   9807 /* SPI */
   9808 
   9809 /*
   9810  * Set SPI and FLASH related information from the EECD register.
   9811  * For 82541 and 82547, the word size is taken from EEPROM.
   9812  */
   9813 static int
   9814 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9815 {
   9816 	int size;
   9817 	uint32_t reg;
   9818 	uint16_t data;
   9819 
   9820 	reg = CSR_READ(sc, WMREG_EECD);
   9821 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9822 
   9823 	/* Read the size of NVM from EECD by default */
   9824 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9825 	switch (sc->sc_type) {
   9826 	case WM_T_82541:
   9827 	case WM_T_82541_2:
   9828 	case WM_T_82547:
   9829 	case WM_T_82547_2:
   9830 		/* Set dummy value to access EEPROM */
   9831 		sc->sc_nvm_wordsize = 64;
   9832 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9833 		reg = data;
   9834 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9835 		if (size == 0)
   9836 			size = 6; /* 64 word size */
   9837 		else
   9838 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9839 		break;
   9840 	case WM_T_80003:
   9841 	case WM_T_82571:
   9842 	case WM_T_82572:
   9843 	case WM_T_82573: /* SPI case */
   9844 	case WM_T_82574: /* SPI case */
   9845 	case WM_T_82583: /* SPI case */
   9846 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9847 		if (size > 14)
   9848 			size = 14;
   9849 		break;
   9850 	case WM_T_82575:
   9851 	case WM_T_82576:
   9852 	case WM_T_82580:
   9853 	case WM_T_I350:
   9854 	case WM_T_I354:
   9855 	case WM_T_I210:
   9856 	case WM_T_I211:
   9857 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9858 		if (size > 15)
   9859 			size = 15;
   9860 		break;
   9861 	default:
   9862 		aprint_error_dev(sc->sc_dev,
   9863 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9864 		return -1;
   9865 		break;
   9866 	}
   9867 
   9868 	sc->sc_nvm_wordsize = 1 << size;
   9869 
   9870 	return 0;
   9871 }
   9872 
   9873 /*
   9874  * wm_nvm_ready_spi:
   9875  *
   9876  *	Wait for a SPI EEPROM to be ready for commands.
   9877  */
   9878 static int
   9879 wm_nvm_ready_spi(struct wm_softc *sc)
   9880 {
   9881 	uint32_t val;
   9882 	int usec;
   9883 
   9884 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9885 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9886 		wm_eeprom_recvbits(sc, &val, 8);
   9887 		if ((val & SPI_SR_RDY) == 0)
   9888 			break;
   9889 	}
   9890 	if (usec >= SPI_MAX_RETRIES) {
   9891 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   9892 		return 1;
   9893 	}
   9894 	return 0;
   9895 }
   9896 
   9897 /*
   9898  * wm_nvm_read_spi:
   9899  *
   9900  *	Read a work from the EEPROM using the SPI protocol.
   9901  */
   9902 static int
   9903 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9904 {
   9905 	uint32_t reg, val;
   9906 	int i;
   9907 	uint8_t opc;
   9908 
   9909 	/* Clear SK and CS. */
   9910 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   9911 	CSR_WRITE(sc, WMREG_EECD, reg);
   9912 	CSR_WRITE_FLUSH(sc);
   9913 	delay(2);
   9914 
   9915 	if (wm_nvm_ready_spi(sc))
   9916 		return 1;
   9917 
   9918 	/* Toggle CS to flush commands. */
   9919 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   9920 	CSR_WRITE_FLUSH(sc);
   9921 	delay(2);
   9922 	CSR_WRITE(sc, WMREG_EECD, reg);
   9923 	CSR_WRITE_FLUSH(sc);
   9924 	delay(2);
   9925 
   9926 	opc = SPI_OPC_READ;
   9927 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   9928 		opc |= SPI_OPC_A8;
   9929 
   9930 	wm_eeprom_sendbits(sc, opc, 8);
   9931 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   9932 
   9933 	for (i = 0; i < wordcnt; i++) {
   9934 		wm_eeprom_recvbits(sc, &val, 16);
   9935 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   9936 	}
   9937 
   9938 	/* Raise CS and clear SK. */
   9939 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   9940 	CSR_WRITE(sc, WMREG_EECD, reg);
   9941 	CSR_WRITE_FLUSH(sc);
   9942 	delay(2);
   9943 
   9944 	return 0;
   9945 }
   9946 
   9947 /* Using with EERD */
   9948 
   9949 static int
   9950 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   9951 {
   9952 	uint32_t attempts = 100000;
   9953 	uint32_t i, reg = 0;
   9954 	int32_t done = -1;
   9955 
   9956 	for (i = 0; i < attempts; i++) {
   9957 		reg = CSR_READ(sc, rw);
   9958 
   9959 		if (reg & EERD_DONE) {
   9960 			done = 0;
   9961 			break;
   9962 		}
   9963 		delay(5);
   9964 	}
   9965 
   9966 	return done;
   9967 }
   9968 
   9969 static int
   9970 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   9971     uint16_t *data)
   9972 {
   9973 	int i, eerd = 0;
   9974 	int error = 0;
   9975 
   9976 	for (i = 0; i < wordcnt; i++) {
   9977 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   9978 
   9979 		CSR_WRITE(sc, WMREG_EERD, eerd);
   9980 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   9981 		if (error != 0)
   9982 			break;
   9983 
   9984 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   9985 	}
   9986 
   9987 	return error;
   9988 }
   9989 
   9990 /* Flash */
   9991 
   9992 static int
   9993 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   9994 {
   9995 	uint32_t eecd;
   9996 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   9997 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   9998 	uint8_t sig_byte = 0;
   9999 
   10000 	switch (sc->sc_type) {
   10001 	case WM_T_ICH8:
   10002 	case WM_T_ICH9:
   10003 		eecd = CSR_READ(sc, WMREG_EECD);
   10004 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10005 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10006 			return 0;
   10007 		}
   10008 		/* FALLTHROUGH */
   10009 	default:
   10010 		/* Default to 0 */
   10011 		*bank = 0;
   10012 
   10013 		/* Check bank 0 */
   10014 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10015 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10016 			*bank = 0;
   10017 			return 0;
   10018 		}
   10019 
   10020 		/* Check bank 1 */
   10021 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10022 		    &sig_byte);
   10023 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10024 			*bank = 1;
   10025 			return 0;
   10026 		}
   10027 	}
   10028 
   10029 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10030 		device_xname(sc->sc_dev)));
   10031 	return -1;
   10032 }
   10033 
   10034 /******************************************************************************
   10035  * This function does initial flash setup so that a new read/write/erase cycle
   10036  * can be started.
   10037  *
   10038  * sc - The pointer to the hw structure
   10039  ****************************************************************************/
   10040 static int32_t
   10041 wm_ich8_cycle_init(struct wm_softc *sc)
   10042 {
   10043 	uint16_t hsfsts;
   10044 	int32_t error = 1;
   10045 	int32_t i     = 0;
   10046 
   10047 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10048 
   10049 	/* May be check the Flash Des Valid bit in Hw status */
   10050 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10051 		return error;
   10052 	}
   10053 
   10054 	/* Clear FCERR in Hw status by writing 1 */
   10055 	/* Clear DAEL in Hw status by writing a 1 */
   10056 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10057 
   10058 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10059 
   10060 	/*
   10061 	 * Either we should have a hardware SPI cycle in progress bit to check
   10062 	 * against, in order to start a new cycle or FDONE bit should be
   10063 	 * changed in the hardware so that it is 1 after harware reset, which
   10064 	 * can then be used as an indication whether a cycle is in progress or
   10065 	 * has been completed .. we should also have some software semaphore
   10066 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10067 	 * threads access to those bits can be sequentiallized or a way so that
   10068 	 * 2 threads dont start the cycle at the same time
   10069 	 */
   10070 
   10071 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10072 		/*
   10073 		 * There is no cycle running at present, so we can start a
   10074 		 * cycle
   10075 		 */
   10076 
   10077 		/* Begin by setting Flash Cycle Done. */
   10078 		hsfsts |= HSFSTS_DONE;
   10079 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10080 		error = 0;
   10081 	} else {
   10082 		/*
   10083 		 * otherwise poll for sometime so the current cycle has a
   10084 		 * chance to end before giving up.
   10085 		 */
   10086 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10087 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10088 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10089 				error = 0;
   10090 				break;
   10091 			}
   10092 			delay(1);
   10093 		}
   10094 		if (error == 0) {
   10095 			/*
   10096 			 * Successful in waiting for previous cycle to timeout,
   10097 			 * now set the Flash Cycle Done.
   10098 			 */
   10099 			hsfsts |= HSFSTS_DONE;
   10100 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10101 		}
   10102 	}
   10103 	return error;
   10104 }
   10105 
   10106 /******************************************************************************
   10107  * This function starts a flash cycle and waits for its completion
   10108  *
   10109  * sc - The pointer to the hw structure
   10110  ****************************************************************************/
   10111 static int32_t
   10112 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10113 {
   10114 	uint16_t hsflctl;
   10115 	uint16_t hsfsts;
   10116 	int32_t error = 1;
   10117 	uint32_t i = 0;
   10118 
   10119 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10120 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10121 	hsflctl |= HSFCTL_GO;
   10122 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10123 
   10124 	/* Wait till FDONE bit is set to 1 */
   10125 	do {
   10126 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10127 		if (hsfsts & HSFSTS_DONE)
   10128 			break;
   10129 		delay(1);
   10130 		i++;
   10131 	} while (i < timeout);
   10132 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10133 		error = 0;
   10134 
   10135 	return error;
   10136 }
   10137 
   10138 /******************************************************************************
   10139  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   10140  *
   10141  * sc - The pointer to the hw structure
   10142  * index - The index of the byte or word to read.
   10143  * size - Size of data to read, 1=byte 2=word
   10144  * data - Pointer to the word to store the value read.
   10145  *****************************************************************************/
   10146 static int32_t
   10147 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10148     uint32_t size, uint16_t *data)
   10149 {
   10150 	uint16_t hsfsts;
   10151 	uint16_t hsflctl;
   10152 	uint32_t flash_linear_address;
   10153 	uint32_t flash_data = 0;
   10154 	int32_t error = 1;
   10155 	int32_t count = 0;
   10156 
   10157 	if (size < 1  || size > 2 || data == 0x0 ||
   10158 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10159 		return error;
   10160 
   10161 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10162 	    sc->sc_ich8_flash_base;
   10163 
   10164 	do {
   10165 		delay(1);
   10166 		/* Steps */
   10167 		error = wm_ich8_cycle_init(sc);
   10168 		if (error)
   10169 			break;
   10170 
   10171 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10172 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10173 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10174 		    & HSFCTL_BCOUNT_MASK;
   10175 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10176 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10177 
   10178 		/*
   10179 		 * Write the last 24 bits of index into Flash Linear address
   10180 		 * field in Flash Address
   10181 		 */
   10182 		/* TODO: TBD maybe check the index against the size of flash */
   10183 
   10184 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10185 
   10186 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10187 
   10188 		/*
   10189 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10190 		 * the whole sequence a few more times, else read in (shift in)
   10191 		 * the Flash Data0, the order is least significant byte first
   10192 		 * msb to lsb
   10193 		 */
   10194 		if (error == 0) {
   10195 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10196 			if (size == 1)
   10197 				*data = (uint8_t)(flash_data & 0x000000FF);
   10198 			else if (size == 2)
   10199 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10200 			break;
   10201 		} else {
   10202 			/*
   10203 			 * If we've gotten here, then things are probably
   10204 			 * completely hosed, but if the error condition is
   10205 			 * detected, it won't hurt to give it another try...
   10206 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10207 			 */
   10208 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10209 			if (hsfsts & HSFSTS_ERR) {
   10210 				/* Repeat for some time before giving up. */
   10211 				continue;
   10212 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10213 				break;
   10214 		}
   10215 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10216 
   10217 	return error;
   10218 }
   10219 
   10220 /******************************************************************************
   10221  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10222  *
   10223  * sc - pointer to wm_hw structure
   10224  * index - The index of the byte to read.
   10225  * data - Pointer to a byte to store the value read.
   10226  *****************************************************************************/
   10227 static int32_t
   10228 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10229 {
   10230 	int32_t status;
   10231 	uint16_t word = 0;
   10232 
   10233 	status = wm_read_ich8_data(sc, index, 1, &word);
   10234 	if (status == 0)
   10235 		*data = (uint8_t)word;
   10236 	else
   10237 		*data = 0;
   10238 
   10239 	return status;
   10240 }
   10241 
   10242 /******************************************************************************
   10243  * Reads a word from the NVM using the ICH8 flash access registers.
   10244  *
   10245  * sc - pointer to wm_hw structure
   10246  * index - The starting byte index of the word to read.
   10247  * data - Pointer to a word to store the value read.
   10248  *****************************************************************************/
   10249 static int32_t
   10250 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10251 {
   10252 	int32_t status;
   10253 
   10254 	status = wm_read_ich8_data(sc, index, 2, data);
   10255 	return status;
   10256 }
   10257 
   10258 /******************************************************************************
   10259  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10260  * register.
   10261  *
   10262  * sc - Struct containing variables accessed by shared code
   10263  * offset - offset of word in the EEPROM to read
   10264  * data - word read from the EEPROM
   10265  * words - number of words to read
   10266  *****************************************************************************/
   10267 static int
   10268 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10269 {
   10270 	int32_t  error = 0;
   10271 	uint32_t flash_bank = 0;
   10272 	uint32_t act_offset = 0;
   10273 	uint32_t bank_offset = 0;
   10274 	uint16_t word = 0;
   10275 	uint16_t i = 0;
   10276 
   10277 	/*
   10278 	 * We need to know which is the valid flash bank.  In the event
   10279 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10280 	 * managing flash_bank.  So it cannot be trusted and needs
   10281 	 * to be updated with each read.
   10282 	 */
   10283 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10284 	if (error) {
   10285 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10286 			device_xname(sc->sc_dev)));
   10287 		flash_bank = 0;
   10288 	}
   10289 
   10290 	/*
   10291 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10292 	 * size
   10293 	 */
   10294 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10295 
   10296 	error = wm_get_swfwhw_semaphore(sc);
   10297 	if (error) {
   10298 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10299 		    __func__);
   10300 		return error;
   10301 	}
   10302 
   10303 	for (i = 0; i < words; i++) {
   10304 		/* The NVM part needs a byte offset, hence * 2 */
   10305 		act_offset = bank_offset + ((offset + i) * 2);
   10306 		error = wm_read_ich8_word(sc, act_offset, &word);
   10307 		if (error) {
   10308 			aprint_error_dev(sc->sc_dev,
   10309 			    "%s: failed to read NVM\n", __func__);
   10310 			break;
   10311 		}
   10312 		data[i] = word;
   10313 	}
   10314 
   10315 	wm_put_swfwhw_semaphore(sc);
   10316 	return error;
   10317 }
   10318 
   10319 /* iNVM */
   10320 
   10321 static int
   10322 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10323 {
   10324 	int32_t  rv = 0;
   10325 	uint32_t invm_dword;
   10326 	uint16_t i;
   10327 	uint8_t record_type, word_address;
   10328 
   10329 	for (i = 0; i < INVM_SIZE; i++) {
   10330 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10331 		/* Get record type */
   10332 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10333 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10334 			break;
   10335 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10336 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10337 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10338 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10339 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10340 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10341 			if (word_address == address) {
   10342 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10343 				rv = 0;
   10344 				break;
   10345 			}
   10346 		}
   10347 	}
   10348 
   10349 	return rv;
   10350 }
   10351 
   10352 static int
   10353 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10354 {
   10355 	int rv = 0;
   10356 	int i;
   10357 
   10358 	for (i = 0; i < words; i++) {
   10359 		switch (offset + i) {
   10360 		case NVM_OFF_MACADDR:
   10361 		case NVM_OFF_MACADDR1:
   10362 		case NVM_OFF_MACADDR2:
   10363 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10364 			if (rv != 0) {
   10365 				data[i] = 0xffff;
   10366 				rv = -1;
   10367 			}
   10368 			break;
   10369 		case NVM_OFF_CFG2:
   10370 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10371 			if (rv != 0) {
   10372 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10373 				rv = 0;
   10374 			}
   10375 			break;
   10376 		case NVM_OFF_CFG4:
   10377 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10378 			if (rv != 0) {
   10379 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10380 				rv = 0;
   10381 			}
   10382 			break;
   10383 		case NVM_OFF_LED_1_CFG:
   10384 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10385 			if (rv != 0) {
   10386 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10387 				rv = 0;
   10388 			}
   10389 			break;
   10390 		case NVM_OFF_LED_0_2_CFG:
   10391 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10392 			if (rv != 0) {
   10393 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10394 				rv = 0;
   10395 			}
   10396 			break;
   10397 		case NVM_OFF_ID_LED_SETTINGS:
   10398 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10399 			if (rv != 0) {
   10400 				*data = ID_LED_RESERVED_FFFF;
   10401 				rv = 0;
   10402 			}
   10403 			break;
   10404 		default:
   10405 			DPRINTF(WM_DEBUG_NVM,
   10406 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10407 			*data = NVM_RESERVED_WORD;
   10408 			break;
   10409 		}
   10410 	}
   10411 
   10412 	return rv;
   10413 }
   10414 
   10415 /* Lock, detecting NVM type, validate checksum, version and read */
   10416 
   10417 /*
   10418  * wm_nvm_acquire:
   10419  *
   10420  *	Perform the EEPROM handshake required on some chips.
   10421  */
   10422 static int
   10423 wm_nvm_acquire(struct wm_softc *sc)
   10424 {
   10425 	uint32_t reg;
   10426 	int x;
   10427 	int ret = 0;
   10428 
   10429 	/* always success */
   10430 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10431 		return 0;
   10432 
   10433 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10434 		ret = wm_get_swfwhw_semaphore(sc);
   10435 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10436 		/* This will also do wm_get_swsm_semaphore() if needed */
   10437 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10438 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10439 		ret = wm_get_swsm_semaphore(sc);
   10440 	}
   10441 
   10442 	if (ret) {
   10443 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10444 			__func__);
   10445 		return 1;
   10446 	}
   10447 
   10448 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10449 		reg = CSR_READ(sc, WMREG_EECD);
   10450 
   10451 		/* Request EEPROM access. */
   10452 		reg |= EECD_EE_REQ;
   10453 		CSR_WRITE(sc, WMREG_EECD, reg);
   10454 
   10455 		/* ..and wait for it to be granted. */
   10456 		for (x = 0; x < 1000; x++) {
   10457 			reg = CSR_READ(sc, WMREG_EECD);
   10458 			if (reg & EECD_EE_GNT)
   10459 				break;
   10460 			delay(5);
   10461 		}
   10462 		if ((reg & EECD_EE_GNT) == 0) {
   10463 			aprint_error_dev(sc->sc_dev,
   10464 			    "could not acquire EEPROM GNT\n");
   10465 			reg &= ~EECD_EE_REQ;
   10466 			CSR_WRITE(sc, WMREG_EECD, reg);
   10467 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10468 				wm_put_swfwhw_semaphore(sc);
   10469 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10470 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10471 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10472 				wm_put_swsm_semaphore(sc);
   10473 			return 1;
   10474 		}
   10475 	}
   10476 
   10477 	return 0;
   10478 }
   10479 
   10480 /*
   10481  * wm_nvm_release:
   10482  *
   10483  *	Release the EEPROM mutex.
   10484  */
   10485 static void
   10486 wm_nvm_release(struct wm_softc *sc)
   10487 {
   10488 	uint32_t reg;
   10489 
   10490 	/* always success */
   10491 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10492 		return;
   10493 
   10494 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10495 		reg = CSR_READ(sc, WMREG_EECD);
   10496 		reg &= ~EECD_EE_REQ;
   10497 		CSR_WRITE(sc, WMREG_EECD, reg);
   10498 	}
   10499 
   10500 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10501 		wm_put_swfwhw_semaphore(sc);
   10502 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10503 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10504 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10505 		wm_put_swsm_semaphore(sc);
   10506 }
   10507 
   10508 static int
   10509 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10510 {
   10511 	uint32_t eecd = 0;
   10512 
   10513 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10514 	    || sc->sc_type == WM_T_82583) {
   10515 		eecd = CSR_READ(sc, WMREG_EECD);
   10516 
   10517 		/* Isolate bits 15 & 16 */
   10518 		eecd = ((eecd >> 15) & 0x03);
   10519 
   10520 		/* If both bits are set, device is Flash type */
   10521 		if (eecd == 0x03)
   10522 			return 0;
   10523 	}
   10524 	return 1;
   10525 }
   10526 
   10527 static int
   10528 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10529 {
   10530 	uint32_t eec;
   10531 
   10532 	eec = CSR_READ(sc, WMREG_EEC);
   10533 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10534 		return 1;
   10535 
   10536 	return 0;
   10537 }
   10538 
   10539 /*
   10540  * wm_nvm_validate_checksum
   10541  *
   10542  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10543  */
   10544 static int
   10545 wm_nvm_validate_checksum(struct wm_softc *sc)
   10546 {
   10547 	uint16_t checksum;
   10548 	uint16_t eeprom_data;
   10549 #ifdef WM_DEBUG
   10550 	uint16_t csum_wordaddr, valid_checksum;
   10551 #endif
   10552 	int i;
   10553 
   10554 	checksum = 0;
   10555 
   10556 	/* Don't check for I211 */
   10557 	if (sc->sc_type == WM_T_I211)
   10558 		return 0;
   10559 
   10560 #ifdef WM_DEBUG
   10561 	if (sc->sc_type == WM_T_PCH_LPT) {
   10562 		csum_wordaddr = NVM_OFF_COMPAT;
   10563 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10564 	} else {
   10565 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10566 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10567 	}
   10568 
   10569 	/* Dump EEPROM image for debug */
   10570 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10571 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10572 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10573 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10574 		if ((eeprom_data & valid_checksum) == 0) {
   10575 			DPRINTF(WM_DEBUG_NVM,
   10576 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10577 				device_xname(sc->sc_dev), eeprom_data,
   10578 				    valid_checksum));
   10579 		}
   10580 	}
   10581 
   10582 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10583 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10584 		for (i = 0; i < NVM_SIZE; i++) {
   10585 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10586 				printf("XXXX ");
   10587 			else
   10588 				printf("%04hx ", eeprom_data);
   10589 			if (i % 8 == 7)
   10590 				printf("\n");
   10591 		}
   10592 	}
   10593 
   10594 #endif /* WM_DEBUG */
   10595 
   10596 	for (i = 0; i < NVM_SIZE; i++) {
   10597 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10598 			return 1;
   10599 		checksum += eeprom_data;
   10600 	}
   10601 
   10602 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10603 #ifdef WM_DEBUG
   10604 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10605 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10606 #endif
   10607 	}
   10608 
   10609 	return 0;
   10610 }
   10611 
   10612 static void
   10613 wm_nvm_version_invm(struct wm_softc *sc)
   10614 {
   10615 	uint32_t dword;
   10616 
   10617 	/*
   10618 	 * Linux's code to decode version is very strange, so we don't
   10619 	 * obey that algorithm and just use word 61 as the document.
   10620 	 * Perhaps it's not perfect though...
   10621 	 *
   10622 	 * Example:
   10623 	 *
   10624 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10625 	 */
   10626 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10627 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10628 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10629 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10630 }
   10631 
   10632 static void
   10633 wm_nvm_version(struct wm_softc *sc)
   10634 {
   10635 	uint16_t major, minor, build, patch;
   10636 	uint16_t uid0, uid1;
   10637 	uint16_t nvm_data;
   10638 	uint16_t off;
   10639 	bool check_version = false;
   10640 	bool check_optionrom = false;
   10641 	bool have_build = false;
   10642 
   10643 	/*
   10644 	 * Version format:
   10645 	 *
   10646 	 * XYYZ
   10647 	 * X0YZ
   10648 	 * X0YY
   10649 	 *
   10650 	 * Example:
   10651 	 *
   10652 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10653 	 *	82571	0x50a6	5.10.6?
   10654 	 *	82572	0x506a	5.6.10?
   10655 	 *	82572EI	0x5069	5.6.9?
   10656 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10657 	 *		0x2013	2.1.3?
   10658 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10659 	 */
   10660 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10661 	switch (sc->sc_type) {
   10662 	case WM_T_82571:
   10663 	case WM_T_82572:
   10664 	case WM_T_82574:
   10665 	case WM_T_82583:
   10666 		check_version = true;
   10667 		check_optionrom = true;
   10668 		have_build = true;
   10669 		break;
   10670 	case WM_T_82575:
   10671 	case WM_T_82576:
   10672 	case WM_T_82580:
   10673 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10674 			check_version = true;
   10675 		break;
   10676 	case WM_T_I211:
   10677 		wm_nvm_version_invm(sc);
   10678 		goto printver;
   10679 	case WM_T_I210:
   10680 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10681 			wm_nvm_version_invm(sc);
   10682 			goto printver;
   10683 		}
   10684 		/* FALLTHROUGH */
   10685 	case WM_T_I350:
   10686 	case WM_T_I354:
   10687 		check_version = true;
   10688 		check_optionrom = true;
   10689 		break;
   10690 	default:
   10691 		return;
   10692 	}
   10693 	if (check_version) {
   10694 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10695 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10696 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10697 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10698 			build = nvm_data & NVM_BUILD_MASK;
   10699 			have_build = true;
   10700 		} else
   10701 			minor = nvm_data & 0x00ff;
   10702 
   10703 		/* Decimal */
   10704 		minor = (minor / 16) * 10 + (minor % 16);
   10705 		sc->sc_nvm_ver_major = major;
   10706 		sc->sc_nvm_ver_minor = minor;
   10707 
   10708 printver:
   10709 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10710 		    sc->sc_nvm_ver_minor);
   10711 		if (have_build) {
   10712 			sc->sc_nvm_ver_build = build;
   10713 			aprint_verbose(".%d", build);
   10714 		}
   10715 	}
   10716 	if (check_optionrom) {
   10717 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10718 		/* Option ROM Version */
   10719 		if ((off != 0x0000) && (off != 0xffff)) {
   10720 			off += NVM_COMBO_VER_OFF;
   10721 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10722 			wm_nvm_read(sc, off, 1, &uid0);
   10723 			if ((uid0 != 0) && (uid0 != 0xffff)
   10724 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10725 				/* 16bits */
   10726 				major = uid0 >> 8;
   10727 				build = (uid0 << 8) | (uid1 >> 8);
   10728 				patch = uid1 & 0x00ff;
   10729 				aprint_verbose(", option ROM Version %d.%d.%d",
   10730 				    major, build, patch);
   10731 			}
   10732 		}
   10733 	}
   10734 
   10735 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10736 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10737 }
   10738 
   10739 /*
   10740  * wm_nvm_read:
   10741  *
   10742  *	Read data from the serial EEPROM.
   10743  */
   10744 static int
   10745 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10746 {
   10747 	int rv;
   10748 
   10749 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10750 		return 1;
   10751 
   10752 	if (wm_nvm_acquire(sc))
   10753 		return 1;
   10754 
   10755 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10756 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10757 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10758 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10759 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10760 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10761 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10762 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10763 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10764 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10765 	else
   10766 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10767 
   10768 	wm_nvm_release(sc);
   10769 	return rv;
   10770 }
   10771 
   10772 /*
   10773  * Hardware semaphores.
   10774  * Very complexed...
   10775  */
   10776 
   10777 static int
   10778 wm_get_swsm_semaphore(struct wm_softc *sc)
   10779 {
   10780 	int32_t timeout;
   10781 	uint32_t swsm;
   10782 
   10783 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10784 		/* Get the SW semaphore. */
   10785 		timeout = sc->sc_nvm_wordsize + 1;
   10786 		while (timeout) {
   10787 			swsm = CSR_READ(sc, WMREG_SWSM);
   10788 
   10789 			if ((swsm & SWSM_SMBI) == 0)
   10790 				break;
   10791 
   10792 			delay(50);
   10793 			timeout--;
   10794 		}
   10795 
   10796 		if (timeout == 0) {
   10797 			aprint_error_dev(sc->sc_dev,
   10798 			    "could not acquire SWSM SMBI\n");
   10799 			return 1;
   10800 		}
   10801 	}
   10802 
   10803 	/* Get the FW semaphore. */
   10804 	timeout = sc->sc_nvm_wordsize + 1;
   10805 	while (timeout) {
   10806 		swsm = CSR_READ(sc, WMREG_SWSM);
   10807 		swsm |= SWSM_SWESMBI;
   10808 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   10809 		/* If we managed to set the bit we got the semaphore. */
   10810 		swsm = CSR_READ(sc, WMREG_SWSM);
   10811 		if (swsm & SWSM_SWESMBI)
   10812 			break;
   10813 
   10814 		delay(50);
   10815 		timeout--;
   10816 	}
   10817 
   10818 	if (timeout == 0) {
   10819 		aprint_error_dev(sc->sc_dev,
   10820 		    "could not acquire SWSM SWESMBI\n");
   10821 		/* Release semaphores */
   10822 		wm_put_swsm_semaphore(sc);
   10823 		return 1;
   10824 	}
   10825 	return 0;
   10826 }
   10827 
   10828 static void
   10829 wm_put_swsm_semaphore(struct wm_softc *sc)
   10830 {
   10831 	uint32_t swsm;
   10832 
   10833 	swsm = CSR_READ(sc, WMREG_SWSM);
   10834 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   10835 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   10836 }
   10837 
   10838 static int
   10839 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10840 {
   10841 	uint32_t swfw_sync;
   10842 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   10843 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   10844 	int timeout = 200;
   10845 
   10846 	for (timeout = 0; timeout < 200; timeout++) {
   10847 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10848 			if (wm_get_swsm_semaphore(sc)) {
   10849 				aprint_error_dev(sc->sc_dev,
   10850 				    "%s: failed to get semaphore\n",
   10851 				    __func__);
   10852 				return 1;
   10853 			}
   10854 		}
   10855 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10856 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   10857 			swfw_sync |= swmask;
   10858 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10859 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   10860 				wm_put_swsm_semaphore(sc);
   10861 			return 0;
   10862 		}
   10863 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   10864 			wm_put_swsm_semaphore(sc);
   10865 		delay(5000);
   10866 	}
   10867 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   10868 	    device_xname(sc->sc_dev), mask, swfw_sync);
   10869 	return 1;
   10870 }
   10871 
   10872 static void
   10873 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10874 {
   10875 	uint32_t swfw_sync;
   10876 
   10877 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10878 		while (wm_get_swsm_semaphore(sc) != 0)
   10879 			continue;
   10880 	}
   10881 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10882 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   10883 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10884 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   10885 		wm_put_swsm_semaphore(sc);
   10886 }
   10887 
   10888 static int
   10889 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   10890 {
   10891 	uint32_t ext_ctrl;
   10892 	int timeout = 200;
   10893 
   10894 	for (timeout = 0; timeout < 200; timeout++) {
   10895 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10896 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10897 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10898 
   10899 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10900 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   10901 			return 0;
   10902 		delay(5000);
   10903 	}
   10904 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   10905 	    device_xname(sc->sc_dev), ext_ctrl);
   10906 	return 1;
   10907 }
   10908 
   10909 static void
   10910 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   10911 {
   10912 	uint32_t ext_ctrl;
   10913 
   10914 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10915 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10916 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10917 }
   10918 
   10919 static int
   10920 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   10921 {
   10922 	int i = 0;
   10923 	uint32_t reg;
   10924 
   10925 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10926 	do {
   10927 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   10928 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   10929 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10930 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   10931 			break;
   10932 		delay(2*1000);
   10933 		i++;
   10934 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   10935 
   10936 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   10937 		wm_put_hw_semaphore_82573(sc);
   10938 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   10939 		    device_xname(sc->sc_dev));
   10940 		return -1;
   10941 	}
   10942 
   10943 	return 0;
   10944 }
   10945 
   10946 static void
   10947 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   10948 {
   10949 	uint32_t reg;
   10950 
   10951 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10952 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10953 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10954 }
   10955 
   10956 /*
   10957  * Management mode and power management related subroutines.
   10958  * BMC, AMT, suspend/resume and EEE.
   10959  */
   10960 
   10961 #ifdef WM_WOL
   10962 static int
   10963 wm_check_mng_mode(struct wm_softc *sc)
   10964 {
   10965 	int rv;
   10966 
   10967 	switch (sc->sc_type) {
   10968 	case WM_T_ICH8:
   10969 	case WM_T_ICH9:
   10970 	case WM_T_ICH10:
   10971 	case WM_T_PCH:
   10972 	case WM_T_PCH2:
   10973 	case WM_T_PCH_LPT:
   10974 		rv = wm_check_mng_mode_ich8lan(sc);
   10975 		break;
   10976 	case WM_T_82574:
   10977 	case WM_T_82583:
   10978 		rv = wm_check_mng_mode_82574(sc);
   10979 		break;
   10980 	case WM_T_82571:
   10981 	case WM_T_82572:
   10982 	case WM_T_82573:
   10983 	case WM_T_80003:
   10984 		rv = wm_check_mng_mode_generic(sc);
   10985 		break;
   10986 	default:
   10987 		/* noting to do */
   10988 		rv = 0;
   10989 		break;
   10990 	}
   10991 
   10992 	return rv;
   10993 }
   10994 
   10995 static int
   10996 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   10997 {
   10998 	uint32_t fwsm;
   10999 
   11000 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11001 
   11002 	if (((fwsm & FWSM_FW_VALID) != 0)
   11003 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11004 		return 1;
   11005 
   11006 	return 0;
   11007 }
   11008 
   11009 static int
   11010 wm_check_mng_mode_82574(struct wm_softc *sc)
   11011 {
   11012 	uint16_t data;
   11013 
   11014 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11015 
   11016 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11017 		return 1;
   11018 
   11019 	return 0;
   11020 }
   11021 
   11022 static int
   11023 wm_check_mng_mode_generic(struct wm_softc *sc)
   11024 {
   11025 	uint32_t fwsm;
   11026 
   11027 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11028 
   11029 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11030 		return 1;
   11031 
   11032 	return 0;
   11033 }
   11034 #endif /* WM_WOL */
   11035 
   11036 static int
   11037 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11038 {
   11039 	uint32_t manc, fwsm, factps;
   11040 
   11041 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11042 		return 0;
   11043 
   11044 	manc = CSR_READ(sc, WMREG_MANC);
   11045 
   11046 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11047 		device_xname(sc->sc_dev), manc));
   11048 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11049 		return 0;
   11050 
   11051 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11052 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11053 		factps = CSR_READ(sc, WMREG_FACTPS);
   11054 		if (((factps & FACTPS_MNGCG) == 0)
   11055 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11056 			return 1;
   11057 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11058 		uint16_t data;
   11059 
   11060 		factps = CSR_READ(sc, WMREG_FACTPS);
   11061 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11062 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11063 			device_xname(sc->sc_dev), factps, data));
   11064 		if (((factps & FACTPS_MNGCG) == 0)
   11065 		    && ((data & NVM_CFG2_MNGM_MASK)
   11066 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11067 			return 1;
   11068 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11069 	    && ((manc & MANC_ASF_EN) == 0))
   11070 		return 1;
   11071 
   11072 	return 0;
   11073 }
   11074 
   11075 static bool
   11076 wm_phy_resetisblocked(struct wm_softc *sc)
   11077 {
   11078 	bool blocked = false;
   11079 	uint32_t reg;
   11080 	int i = 0;
   11081 
   11082 	switch (sc->sc_type) {
   11083 	case WM_T_ICH8:
   11084 	case WM_T_ICH9:
   11085 	case WM_T_ICH10:
   11086 	case WM_T_PCH:
   11087 	case WM_T_PCH2:
   11088 	case WM_T_PCH_LPT:
   11089 		do {
   11090 			reg = CSR_READ(sc, WMREG_FWSM);
   11091 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11092 				blocked = true;
   11093 				delay(10*1000);
   11094 				continue;
   11095 			}
   11096 			blocked = false;
   11097 		} while (blocked && (i++ < 10));
   11098 		return blocked;
   11099 		break;
   11100 	case WM_T_82571:
   11101 	case WM_T_82572:
   11102 	case WM_T_82573:
   11103 	case WM_T_82574:
   11104 	case WM_T_82583:
   11105 	case WM_T_80003:
   11106 		reg = CSR_READ(sc, WMREG_MANC);
   11107 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11108 			return true;
   11109 		else
   11110 			return false;
   11111 		break;
   11112 	default:
   11113 		/* no problem */
   11114 		break;
   11115 	}
   11116 
   11117 	return false;
   11118 }
   11119 
   11120 static void
   11121 wm_get_hw_control(struct wm_softc *sc)
   11122 {
   11123 	uint32_t reg;
   11124 
   11125 	switch (sc->sc_type) {
   11126 	case WM_T_82573:
   11127 		reg = CSR_READ(sc, WMREG_SWSM);
   11128 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11129 		break;
   11130 	case WM_T_82571:
   11131 	case WM_T_82572:
   11132 	case WM_T_82574:
   11133 	case WM_T_82583:
   11134 	case WM_T_80003:
   11135 	case WM_T_ICH8:
   11136 	case WM_T_ICH9:
   11137 	case WM_T_ICH10:
   11138 	case WM_T_PCH:
   11139 	case WM_T_PCH2:
   11140 	case WM_T_PCH_LPT:
   11141 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11142 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11143 		break;
   11144 	default:
   11145 		break;
   11146 	}
   11147 }
   11148 
   11149 static void
   11150 wm_release_hw_control(struct wm_softc *sc)
   11151 {
   11152 	uint32_t reg;
   11153 
   11154 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11155 		return;
   11156 
   11157 	if (sc->sc_type == WM_T_82573) {
   11158 		reg = CSR_READ(sc, WMREG_SWSM);
   11159 		reg &= ~SWSM_DRV_LOAD;
   11160 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11161 	} else {
   11162 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11163 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11164 	}
   11165 }
   11166 
   11167 static void
   11168 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   11169 {
   11170 	uint32_t reg;
   11171 
   11172 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11173 
   11174 	if (on != 0)
   11175 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11176 	else
   11177 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11178 
   11179 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11180 }
   11181 
   11182 static void
   11183 wm_smbustopci(struct wm_softc *sc)
   11184 {
   11185 	uint32_t fwsm;
   11186 
   11187 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11188 	if (((fwsm & FWSM_FW_VALID) == 0)
   11189 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11190 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11191 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11192 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11193 		CSR_WRITE_FLUSH(sc);
   11194 		delay(10);
   11195 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11196 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11197 		CSR_WRITE_FLUSH(sc);
   11198 		delay(50*1000);
   11199 
   11200 		/*
   11201 		 * Gate automatic PHY configuration by hardware on non-managed
   11202 		 * 82579
   11203 		 */
   11204 		if (sc->sc_type == WM_T_PCH2)
   11205 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   11206 	}
   11207 }
   11208 
   11209 static void
   11210 wm_init_manageability(struct wm_softc *sc)
   11211 {
   11212 
   11213 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11214 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11215 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11216 
   11217 		/* Disable hardware interception of ARP */
   11218 		manc &= ~MANC_ARP_EN;
   11219 
   11220 		/* Enable receiving management packets to the host */
   11221 		if (sc->sc_type >= WM_T_82571) {
   11222 			manc |= MANC_EN_MNG2HOST;
   11223 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11224 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11225 		}
   11226 
   11227 		CSR_WRITE(sc, WMREG_MANC, manc);
   11228 	}
   11229 }
   11230 
   11231 static void
   11232 wm_release_manageability(struct wm_softc *sc)
   11233 {
   11234 
   11235 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11236 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11237 
   11238 		manc |= MANC_ARP_EN;
   11239 		if (sc->sc_type >= WM_T_82571)
   11240 			manc &= ~MANC_EN_MNG2HOST;
   11241 
   11242 		CSR_WRITE(sc, WMREG_MANC, manc);
   11243 	}
   11244 }
   11245 
   11246 static void
   11247 wm_get_wakeup(struct wm_softc *sc)
   11248 {
   11249 
   11250 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11251 	switch (sc->sc_type) {
   11252 	case WM_T_82573:
   11253 	case WM_T_82583:
   11254 		sc->sc_flags |= WM_F_HAS_AMT;
   11255 		/* FALLTHROUGH */
   11256 	case WM_T_80003:
   11257 	case WM_T_82541:
   11258 	case WM_T_82547:
   11259 	case WM_T_82571:
   11260 	case WM_T_82572:
   11261 	case WM_T_82574:
   11262 	case WM_T_82575:
   11263 	case WM_T_82576:
   11264 	case WM_T_82580:
   11265 	case WM_T_I350:
   11266 	case WM_T_I354:
   11267 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11268 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11269 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11270 		break;
   11271 	case WM_T_ICH8:
   11272 	case WM_T_ICH9:
   11273 	case WM_T_ICH10:
   11274 	case WM_T_PCH:
   11275 	case WM_T_PCH2:
   11276 	case WM_T_PCH_LPT:
   11277 		sc->sc_flags |= WM_F_HAS_AMT;
   11278 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11279 		break;
   11280 	default:
   11281 		break;
   11282 	}
   11283 
   11284 	/* 1: HAS_MANAGE */
   11285 	if (wm_enable_mng_pass_thru(sc) != 0)
   11286 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11287 
   11288 #ifdef WM_DEBUG
   11289 	printf("\n");
   11290 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11291 		printf("HAS_AMT,");
   11292 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11293 		printf("ARC_SUBSYS_VALID,");
   11294 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11295 		printf("ASF_FIRMWARE_PRES,");
   11296 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11297 		printf("HAS_MANAGE,");
   11298 	printf("\n");
   11299 #endif
   11300 	/*
   11301 	 * Note that the WOL flags is set after the resetting of the eeprom
   11302 	 * stuff
   11303 	 */
   11304 }
   11305 
   11306 #ifdef WM_WOL
   11307 /* WOL in the newer chipset interfaces (pchlan) */
   11308 static void
   11309 wm_enable_phy_wakeup(struct wm_softc *sc)
   11310 {
   11311 #if 0
   11312 	uint16_t preg;
   11313 
   11314 	/* Copy MAC RARs to PHY RARs */
   11315 
   11316 	/* Copy MAC MTA to PHY MTA */
   11317 
   11318 	/* Configure PHY Rx Control register */
   11319 
   11320 	/* Enable PHY wakeup in MAC register */
   11321 
   11322 	/* Configure and enable PHY wakeup in PHY registers */
   11323 
   11324 	/* Activate PHY wakeup */
   11325 
   11326 	/* XXX */
   11327 #endif
   11328 }
   11329 
   11330 /* Power down workaround on D3 */
   11331 static void
   11332 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11333 {
   11334 	uint32_t reg;
   11335 	int i;
   11336 
   11337 	for (i = 0; i < 2; i++) {
   11338 		/* Disable link */
   11339 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11340 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11341 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11342 
   11343 		/*
   11344 		 * Call gig speed drop workaround on Gig disable before
   11345 		 * accessing any PHY registers
   11346 		 */
   11347 		if (sc->sc_type == WM_T_ICH8)
   11348 			wm_gig_downshift_workaround_ich8lan(sc);
   11349 
   11350 		/* Write VR power-down enable */
   11351 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11352 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11353 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11354 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11355 
   11356 		/* Read it back and test */
   11357 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11358 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11359 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11360 			break;
   11361 
   11362 		/* Issue PHY reset and repeat at most one more time */
   11363 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11364 	}
   11365 }
   11366 
   11367 static void
   11368 wm_enable_wakeup(struct wm_softc *sc)
   11369 {
   11370 	uint32_t reg, pmreg;
   11371 	pcireg_t pmode;
   11372 
   11373 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11374 		&pmreg, NULL) == 0)
   11375 		return;
   11376 
   11377 	/* Advertise the wakeup capability */
   11378 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11379 	    | CTRL_SWDPIN(3));
   11380 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11381 
   11382 	/* ICH workaround */
   11383 	switch (sc->sc_type) {
   11384 	case WM_T_ICH8:
   11385 	case WM_T_ICH9:
   11386 	case WM_T_ICH10:
   11387 	case WM_T_PCH:
   11388 	case WM_T_PCH2:
   11389 	case WM_T_PCH_LPT:
   11390 		/* Disable gig during WOL */
   11391 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11392 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11393 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11394 		if (sc->sc_type == WM_T_PCH)
   11395 			wm_gmii_reset(sc);
   11396 
   11397 		/* Power down workaround */
   11398 		if (sc->sc_phytype == WMPHY_82577) {
   11399 			struct mii_softc *child;
   11400 
   11401 			/* Assume that the PHY is copper */
   11402 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11403 			if (child->mii_mpd_rev <= 2)
   11404 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11405 				    (768 << 5) | 25, 0x0444); /* magic num */
   11406 		}
   11407 		break;
   11408 	default:
   11409 		break;
   11410 	}
   11411 
   11412 	/* Keep the laser running on fiber adapters */
   11413 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11414 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11415 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11416 		reg |= CTRL_EXT_SWDPIN(3);
   11417 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11418 	}
   11419 
   11420 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11421 #if 0	/* for the multicast packet */
   11422 	reg |= WUFC_MC;
   11423 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11424 #endif
   11425 
   11426 	if (sc->sc_type == WM_T_PCH) {
   11427 		wm_enable_phy_wakeup(sc);
   11428 	} else {
   11429 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11430 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11431 	}
   11432 
   11433 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11434 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11435 		|| (sc->sc_type == WM_T_PCH2))
   11436 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11437 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11438 
   11439 	/* Request PME */
   11440 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11441 #if 0
   11442 	/* Disable WOL */
   11443 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11444 #else
   11445 	/* For WOL */
   11446 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11447 #endif
   11448 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11449 }
   11450 #endif /* WM_WOL */
   11451 
   11452 /* LPLU */
   11453 
   11454 static void
   11455 wm_lplu_d0_disable(struct wm_softc *sc)
   11456 {
   11457 	uint32_t reg;
   11458 
   11459 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11460 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11461 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11462 }
   11463 
   11464 static void
   11465 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11466 {
   11467 	uint32_t reg;
   11468 
   11469 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11470 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11471 	reg |= HV_OEM_BITS_ANEGNOW;
   11472 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11473 }
   11474 
   11475 /* EEE */
   11476 
   11477 static void
   11478 wm_set_eee_i350(struct wm_softc *sc)
   11479 {
   11480 	uint32_t ipcnfg, eeer;
   11481 
   11482 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11483 	eeer = CSR_READ(sc, WMREG_EEER);
   11484 
   11485 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11486 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11487 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11488 		    | EEER_LPI_FC);
   11489 	} else {
   11490 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11491 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11492 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11493 		    | EEER_LPI_FC);
   11494 	}
   11495 
   11496 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11497 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11498 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11499 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11500 }
   11501 
   11502 /*
   11503  * Workarounds (mainly PHY related).
   11504  * Basically, PHY's workarounds are in the PHY drivers.
   11505  */
   11506 
   11507 /* Work-around for 82566 Kumeran PCS lock loss */
   11508 static void
   11509 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11510 {
   11511 #if 0
   11512 	int miistatus, active, i;
   11513 	int reg;
   11514 
   11515 	miistatus = sc->sc_mii.mii_media_status;
   11516 
   11517 	/* If the link is not up, do nothing */
   11518 	if ((miistatus & IFM_ACTIVE) == 0)
   11519 		return;
   11520 
   11521 	active = sc->sc_mii.mii_media_active;
   11522 
   11523 	/* Nothing to do if the link is other than 1Gbps */
   11524 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11525 		return;
   11526 
   11527 	for (i = 0; i < 10; i++) {
   11528 		/* read twice */
   11529 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11530 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11531 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11532 			goto out;	/* GOOD! */
   11533 
   11534 		/* Reset the PHY */
   11535 		wm_gmii_reset(sc);
   11536 		delay(5*1000);
   11537 	}
   11538 
   11539 	/* Disable GigE link negotiation */
   11540 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11541 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11542 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11543 
   11544 	/*
   11545 	 * Call gig speed drop workaround on Gig disable before accessing
   11546 	 * any PHY registers.
   11547 	 */
   11548 	wm_gig_downshift_workaround_ich8lan(sc);
   11549 
   11550 out:
   11551 	return;
   11552 #endif
   11553 }
   11554 
   11555 /* WOL from S5 stops working */
   11556 static void
   11557 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11558 {
   11559 	uint16_t kmrn_reg;
   11560 
   11561 	/* Only for igp3 */
   11562 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11563 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11564 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11565 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11566 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11567 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11568 	}
   11569 }
   11570 
   11571 /*
   11572  * Workaround for pch's PHYs
   11573  * XXX should be moved to new PHY driver?
   11574  */
   11575 static void
   11576 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11577 {
   11578 	if (sc->sc_phytype == WMPHY_82577)
   11579 		wm_set_mdio_slow_mode_hv(sc);
   11580 
   11581 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11582 
   11583 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11584 
   11585 	/* 82578 */
   11586 	if (sc->sc_phytype == WMPHY_82578) {
   11587 		/* PCH rev. < 3 */
   11588 		if (sc->sc_rev < 3) {
   11589 			/* XXX 6 bit shift? Why? Is it page2? */
   11590 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11591 			    0x66c0);
   11592 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11593 			    0xffff);
   11594 		}
   11595 
   11596 		/* XXX phy rev. < 2 */
   11597 	}
   11598 
   11599 	/* Select page 0 */
   11600 
   11601 	/* XXX acquire semaphore */
   11602 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11603 	/* XXX release semaphore */
   11604 
   11605 	/*
   11606 	 * Configure the K1 Si workaround during phy reset assuming there is
   11607 	 * link so that it disables K1 if link is in 1Gbps.
   11608 	 */
   11609 	wm_k1_gig_workaround_hv(sc, 1);
   11610 }
   11611 
   11612 static void
   11613 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11614 {
   11615 
   11616 	wm_set_mdio_slow_mode_hv(sc);
   11617 }
   11618 
   11619 static void
   11620 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11621 {
   11622 	int k1_enable = sc->sc_nvm_k1_enabled;
   11623 
   11624 	/* XXX acquire semaphore */
   11625 
   11626 	if (link) {
   11627 		k1_enable = 0;
   11628 
   11629 		/* Link stall fix for link up */
   11630 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11631 	} else {
   11632 		/* Link stall fix for link down */
   11633 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11634 	}
   11635 
   11636 	wm_configure_k1_ich8lan(sc, k1_enable);
   11637 
   11638 	/* XXX release semaphore */
   11639 }
   11640 
   11641 static void
   11642 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11643 {
   11644 	uint32_t reg;
   11645 
   11646 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11647 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11648 	    reg | HV_KMRN_MDIO_SLOW);
   11649 }
   11650 
   11651 static void
   11652 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11653 {
   11654 	uint32_t ctrl, ctrl_ext, tmp;
   11655 	uint16_t kmrn_reg;
   11656 
   11657 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11658 
   11659 	if (k1_enable)
   11660 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11661 	else
   11662 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11663 
   11664 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11665 
   11666 	delay(20);
   11667 
   11668 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11669 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11670 
   11671 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11672 	tmp |= CTRL_FRCSPD;
   11673 
   11674 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11675 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11676 	CSR_WRITE_FLUSH(sc);
   11677 	delay(20);
   11678 
   11679 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11680 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11681 	CSR_WRITE_FLUSH(sc);
   11682 	delay(20);
   11683 }
   11684 
   11685 /* special case - for 82575 - need to do manual init ... */
   11686 static void
   11687 wm_reset_init_script_82575(struct wm_softc *sc)
   11688 {
   11689 	/*
   11690 	 * remark: this is untested code - we have no board without EEPROM
   11691 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11692 	 */
   11693 
   11694 	/* SerDes configuration via SERDESCTRL */
   11695 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11696 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11697 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11698 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11699 
   11700 	/* CCM configuration via CCMCTL register */
   11701 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11702 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11703 
   11704 	/* PCIe lanes configuration */
   11705 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11706 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11707 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11708 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11709 
   11710 	/* PCIe PLL Configuration */
   11711 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11712 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11713 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11714 }
   11715 
   11716 static void
   11717 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11718 {
   11719 	uint32_t reg;
   11720 	uint16_t nvmword;
   11721 	int rv;
   11722 
   11723 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11724 		return;
   11725 
   11726 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11727 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11728 	if (rv != 0) {
   11729 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11730 		    __func__);
   11731 		return;
   11732 	}
   11733 
   11734 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11735 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11736 		reg |= MDICNFG_DEST;
   11737 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11738 		reg |= MDICNFG_COM_MDIO;
   11739 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11740 }
   11741 
   11742 /*
   11743  * I210 Errata 25 and I211 Errata 10
   11744  * Slow System Clock.
   11745  */
   11746 static void
   11747 wm_pll_workaround_i210(struct wm_softc *sc)
   11748 {
   11749 	uint32_t mdicnfg, wuc;
   11750 	uint32_t reg;
   11751 	pcireg_t pcireg;
   11752 	uint32_t pmreg;
   11753 	uint16_t nvmword, tmp_nvmword;
   11754 	int phyval;
   11755 	bool wa_done = false;
   11756 	int i;
   11757 
   11758 	/* Save WUC and MDICNFG registers */
   11759 	wuc = CSR_READ(sc, WMREG_WUC);
   11760 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   11761 
   11762 	reg = mdicnfg & ~MDICNFG_DEST;
   11763 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11764 
   11765 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   11766 		nvmword = INVM_DEFAULT_AL;
   11767 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   11768 
   11769 	/* Get Power Management cap offset */
   11770 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11771 		&pmreg, NULL) == 0)
   11772 		return;
   11773 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   11774 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   11775 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   11776 
   11777 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   11778 			break; /* OK */
   11779 		}
   11780 
   11781 		wa_done = true;
   11782 		/* Directly reset the internal PHY */
   11783 		reg = CSR_READ(sc, WMREG_CTRL);
   11784 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   11785 
   11786 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11787 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   11788 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11789 
   11790 		CSR_WRITE(sc, WMREG_WUC, 0);
   11791 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   11792 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11793 
   11794 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   11795 		    pmreg + PCI_PMCSR);
   11796 		pcireg |= PCI_PMCSR_STATE_D3;
   11797 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11798 		    pmreg + PCI_PMCSR, pcireg);
   11799 		delay(1000);
   11800 		pcireg &= ~PCI_PMCSR_STATE_D3;
   11801 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11802 		    pmreg + PCI_PMCSR, pcireg);
   11803 
   11804 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   11805 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11806 
   11807 		/* Restore WUC register */
   11808 		CSR_WRITE(sc, WMREG_WUC, wuc);
   11809 	}
   11810 
   11811 	/* Restore MDICNFG setting */
   11812 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   11813 	if (wa_done)
   11814 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   11815 }
   11816