Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.384
      1 /*	$NetBSD: if_wm.c,v 1.384 2015/12/22 02:10:25 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.384 2015/12/22 02:10:25 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    151 
    152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    153 #else
    154 #define	DPRINTF(x, y)	/* nothing */
    155 #endif /* WM_DEBUG */
    156 
    157 #ifdef NET_MPSAFE
    158 #define WM_MPSAFE	1
    159 #endif
    160 
    161 /*
    162  * This device driver's max interrupt numbers.
    163  */
    164 #define WM_MAX_NTXINTR		16
    165 #define WM_MAX_NRXINTR		16
    166 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    167 
    168 /*
    169  * Transmit descriptor list size.  Due to errata, we can only have
    170  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    171  * on >= 82544.  We tell the upper layers that they can queue a lot
    172  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    173  * of them at a time.
    174  *
    175  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    176  * chains containing many small mbufs have been observed in zero-copy
    177  * situations with jumbo frames.
    178  */
    179 #define	WM_NTXSEGS		256
    180 #define	WM_IFQUEUELEN		256
    181 #define	WM_TXQUEUELEN_MAX	64
    182 #define	WM_TXQUEUELEN_MAX_82547	16
    183 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    184 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    185 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    186 #define	WM_NTXDESC_82542	256
    187 #define	WM_NTXDESC_82544	4096
    188 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    189 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    190 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    191 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    192 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    193 
    194 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    195 
    196 /*
    197  * Receive descriptor list size.  We have one Rx buffer for normal
    198  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    199  * packet.  We allocate 256 receive descriptors, each with a 2k
    200  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    201  */
    202 #define	WM_NRXDESC		256
    203 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    204 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    205 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    206 
    207 typedef union txdescs {
    208 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    209 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    210 } txdescs_t;
    211 
    212 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    213 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    214 
    215 /*
    216  * Software state for transmit jobs.
    217  */
    218 struct wm_txsoft {
    219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    221 	int txs_firstdesc;		/* first descriptor in packet */
    222 	int txs_lastdesc;		/* last descriptor in packet */
    223 	int txs_ndesc;			/* # of descriptors used */
    224 };
    225 
    226 /*
    227  * Software state for receive buffers.  Each descriptor gets a
    228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    229  * more than one buffer, we chain them together.
    230  */
    231 struct wm_rxsoft {
    232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    234 };
    235 
    236 #define WM_LINKUP_TIMEOUT	50
    237 
    238 static uint16_t swfwphysem[] = {
    239 	SWFW_PHY0_SM,
    240 	SWFW_PHY1_SM,
    241 	SWFW_PHY2_SM,
    242 	SWFW_PHY3_SM
    243 };
    244 
    245 static const uint32_t wm_82580_rxpbs_table[] = {
    246 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    247 };
    248 
    249 struct wm_softc;
    250 
    251 struct wm_txqueue {
    252 	kmutex_t *txq_lock;		/* lock for tx operations */
    253 
    254 	struct wm_softc *txq_sc;
    255 
    256 	int txq_id;			/* index of transmit queues */
    257 	int txq_intr_idx;		/* index of MSI-X tables */
    258 
    259 	/* Software state for the transmit descriptors. */
    260 	int txq_num;			/* must be a power of two */
    261 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    262 
    263 	/* TX control data structures. */
    264 	int txq_ndesc;			/* must be a power of two */
    265 	txdescs_t *txq_descs_u;
    266         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    267 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    268 	int txq_desc_rseg;		/* real number of control segment */
    269 	size_t txq_desc_size;		/* control data size */
    270 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    271 #define	txq_descs	txq_descs_u->sctxu_txdescs
    272 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    273 
    274 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    275 
    276 	int txq_free;			/* number of free Tx descriptors */
    277 	int txq_next;			/* next ready Tx descriptor */
    278 
    279 	int txq_sfree;			/* number of free Tx jobs */
    280 	int txq_snext;			/* next free Tx job */
    281 	int txq_sdirty;			/* dirty Tx jobs */
    282 
    283 	/* These 4 variables are used only on the 82547. */
    284 	int txq_fifo_size;		/* Tx FIFO size */
    285 	int txq_fifo_head;		/* current head of FIFO */
    286 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    287 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    288 
    289 	/* XXX which event counter is required? */
    290 };
    291 
    292 struct wm_rxqueue {
    293 	kmutex_t *rxq_lock;		/* lock for rx operations */
    294 
    295 	struct wm_softc *rxq_sc;
    296 
    297 	int rxq_id;			/* index of receive queues */
    298 	int rxq_intr_idx;		/* index of MSI-X tables */
    299 
    300 	/* Software state for the receive descriptors. */
    301 	wiseman_rxdesc_t *rxq_descs;
    302 
    303 	/* RX control data structures. */
    304 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    305 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    306 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    307 	int rxq_desc_rseg;		/* real number of control segment */
    308 	size_t rxq_desc_size;		/* control data size */
    309 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    310 
    311 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    312 
    313 	int rxq_ptr;			/* next ready Rx descriptor/queue ent */
    314 	int rxq_discard;
    315 	int rxq_len;
    316 	struct mbuf *rxq_head;
    317 	struct mbuf *rxq_tail;
    318 	struct mbuf **rxq_tailp;
    319 
    320 	/* XXX which event counter is required? */
    321 };
    322 
    323 /*
    324  * Software state per device.
    325  */
    326 struct wm_softc {
    327 	device_t sc_dev;		/* generic device information */
    328 	bus_space_tag_t sc_st;		/* bus space tag */
    329 	bus_space_handle_t sc_sh;	/* bus space handle */
    330 	bus_size_t sc_ss;		/* bus space size */
    331 	bus_space_tag_t sc_iot;		/* I/O space tag */
    332 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    333 	bus_size_t sc_ios;		/* I/O space size */
    334 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    335 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    336 	bus_size_t sc_flashs;		/* flash registers space size */
    337 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    338 
    339 	struct ethercom sc_ethercom;	/* ethernet common data */
    340 	struct mii_data sc_mii;		/* MII/media information */
    341 
    342 	pci_chipset_tag_t sc_pc;
    343 	pcitag_t sc_pcitag;
    344 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    345 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    346 
    347 	uint16_t sc_pcidevid;		/* PCI device ID */
    348 	wm_chip_type sc_type;		/* MAC type */
    349 	int sc_rev;			/* MAC revision */
    350 	wm_phy_type sc_phytype;		/* PHY type */
    351 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    352 #define	WM_MEDIATYPE_UNKNOWN		0x00
    353 #define	WM_MEDIATYPE_FIBER		0x01
    354 #define	WM_MEDIATYPE_COPPER		0x02
    355 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    356 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    357 	int sc_flags;			/* flags; see below */
    358 	int sc_if_flags;		/* last if_flags */
    359 	int sc_flowflags;		/* 802.3x flow control flags */
    360 	int sc_align_tweak;
    361 
    362 	void *sc_ihs[WM_MAX_NINTR];	/*
    363 					 * interrupt cookie.
    364 					 * legacy and msi use sc_ihs[0].
    365 					 */
    366 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    367 	int sc_nintrs;			/* number of interrupts */
    368 
    369 	int sc_link_intr_idx;		/* index of MSI-X tables */
    370 
    371 	callout_t sc_tick_ch;		/* tick callout */
    372 	bool sc_stopping;
    373 
    374 	int sc_nvm_ver_major;
    375 	int sc_nvm_ver_minor;
    376 	int sc_nvm_ver_build;
    377 	int sc_nvm_addrbits;		/* NVM address bits */
    378 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    379 	int sc_ich8_flash_base;
    380 	int sc_ich8_flash_bank_size;
    381 	int sc_nvm_k1_enabled;
    382 
    383 	int sc_ntxqueues;
    384 	struct wm_txqueue *sc_txq;
    385 
    386 	int sc_nrxqueues;
    387 	struct wm_rxqueue *sc_rxq;
    388 
    389 #ifdef WM_EVENT_COUNTERS
    390 	/* Event counters. */
    391 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    392 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    393 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    394 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    395 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    396 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    397 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    398 
    399 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    400 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    401 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    402 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    403 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    404 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    405 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    406 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    407 
    408 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    410 
    411 	struct evcnt sc_ev_tu;		/* Tx underrun */
    412 
    413 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    414 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    415 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    416 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    417 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    418 #endif /* WM_EVENT_COUNTERS */
    419 
    420 	/* This variable are used only on the 82547. */
    421 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    422 
    423 	uint32_t sc_ctrl;		/* prototype CTRL register */
    424 #if 0
    425 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    426 #endif
    427 	uint32_t sc_icr;		/* prototype interrupt bits */
    428 	uint32_t sc_itr;		/* prototype intr throttling reg */
    429 	uint32_t sc_tctl;		/* prototype TCTL register */
    430 	uint32_t sc_rctl;		/* prototype RCTL register */
    431 	uint32_t sc_txcw;		/* prototype TXCW register */
    432 	uint32_t sc_tipg;		/* prototype TIPG register */
    433 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    434 	uint32_t sc_pba;		/* prototype PBA register */
    435 
    436 	int sc_tbi_linkup;		/* TBI link status */
    437 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    438 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    439 
    440 	int sc_mchash_type;		/* multicast filter offset */
    441 
    442 	krndsource_t rnd_source;	/* random source */
    443 
    444 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    445 };
    446 
    447 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    448 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    449 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    450 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    451 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    452 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    453 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    454 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    455 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    456 
    457 #ifdef WM_MPSAFE
    458 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    459 #else
    460 #define CALLOUT_FLAGS	0
    461 #endif
    462 
    463 #define	WM_RXCHAIN_RESET(rxq)						\
    464 do {									\
    465 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    466 	*(rxq)->rxq_tailp = NULL;					\
    467 	(rxq)->rxq_len = 0;						\
    468 } while (/*CONSTCOND*/0)
    469 
    470 #define	WM_RXCHAIN_LINK(rxq, m)						\
    471 do {									\
    472 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    473 	(rxq)->rxq_tailp = &(m)->m_next;				\
    474 } while (/*CONSTCOND*/0)
    475 
    476 #ifdef WM_EVENT_COUNTERS
    477 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    478 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    479 #else
    480 #define	WM_EVCNT_INCR(ev)	/* nothing */
    481 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    482 #endif
    483 
    484 #define	CSR_READ(sc, reg)						\
    485 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    486 #define	CSR_WRITE(sc, reg, val)						\
    487 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    488 #define	CSR_WRITE_FLUSH(sc)						\
    489 	(void) CSR_READ((sc), WMREG_STATUS)
    490 
    491 #define ICH8_FLASH_READ32(sc, reg) \
    492 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    494 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    495 
    496 #define ICH8_FLASH_READ16(sc, reg) \
    497 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    499 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    500 
    501 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    502 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    503 
    504 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    505 #define	WM_CDTXADDR_HI(txq, x)						\
    506 	(sizeof(bus_addr_t) == 8 ?					\
    507 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    508 
    509 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    510 #define	WM_CDRXADDR_HI(rxq, x)						\
    511 	(sizeof(bus_addr_t) == 8 ?					\
    512 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    513 
    514 /*
    515  * Register read/write functions.
    516  * Other than CSR_{READ|WRITE}().
    517  */
    518 #if 0
    519 static inline uint32_t wm_io_read(struct wm_softc *, int);
    520 #endif
    521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    523 	uint32_t, uint32_t);
    524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    525 
    526 /*
    527  * Descriptor sync/init functions.
    528  */
    529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    532 
    533 /*
    534  * Device driver interface functions and commonly used functions.
    535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    536  */
    537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    538 static int	wm_match(device_t, cfdata_t, void *);
    539 static void	wm_attach(device_t, device_t, void *);
    540 static int	wm_detach(device_t, int);
    541 static bool	wm_suspend(device_t, const pmf_qual_t *);
    542 static bool	wm_resume(device_t, const pmf_qual_t *);
    543 static void	wm_watchdog(struct ifnet *);
    544 static void	wm_tick(void *);
    545 static int	wm_ifflags_cb(struct ethercom *);
    546 static int	wm_ioctl(struct ifnet *, u_long, void *);
    547 /* MAC address related */
    548 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    552 static void	wm_set_filter(struct wm_softc *);
    553 /* Reset and init related */
    554 static void	wm_set_vlan(struct wm_softc *);
    555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    556 static void	wm_get_auto_rd_done(struct wm_softc *);
    557 static void	wm_lan_init_done(struct wm_softc *);
    558 static void	wm_get_cfg_done(struct wm_softc *);
    559 static void	wm_initialize_hardware_bits(struct wm_softc *);
    560 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    561 static void	wm_reset(struct wm_softc *);
    562 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    563 static void	wm_rxdrain(struct wm_rxqueue *);
    564 static void	wm_rss_getkey(uint8_t *);
    565 static void	wm_init_rss(struct wm_softc *);
    566 static void	wm_adjust_qnum(struct wm_softc *, int);
    567 static int	wm_setup_legacy(struct wm_softc *);
    568 static int	wm_setup_msix(struct wm_softc *);
    569 static int	wm_init(struct ifnet *);
    570 static int	wm_init_locked(struct ifnet *);
    571 static void	wm_stop(struct ifnet *, int);
    572 static void	wm_stop_locked(struct ifnet *, int);
    573 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    574 static void	wm_82547_txfifo_stall(void *);
    575 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    576 /* DMA related */
    577 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    578 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    579 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    580 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    581 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    582 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    583 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    584 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    585 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    586 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    587 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    588 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    589 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    590 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    591 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    592 static int	wm_alloc_txrx_queues(struct wm_softc *);
    593 static void	wm_free_txrx_queues(struct wm_softc *);
    594 static int	wm_init_txrx_queues(struct wm_softc *);
    595 /* Start */
    596 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    597     uint32_t *, uint8_t *);
    598 static void	wm_start(struct ifnet *);
    599 static void	wm_start_locked(struct ifnet *);
    600 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    601     uint32_t *, uint32_t *, bool *);
    602 static void	wm_nq_start(struct ifnet *);
    603 static void	wm_nq_start_locked(struct ifnet *);
    604 /* Interrupt */
    605 static int	wm_txeof(struct wm_softc *);
    606 static void	wm_rxeof(struct wm_rxqueue *);
    607 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    608 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    609 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    610 static void	wm_linkintr(struct wm_softc *, uint32_t);
    611 static int	wm_intr_legacy(void *);
    612 static int	wm_txintr_msix(void *);
    613 static int	wm_rxintr_msix(void *);
    614 static int	wm_linkintr_msix(void *);
    615 
    616 /*
    617  * Media related.
    618  * GMII, SGMII, TBI, SERDES and SFP.
    619  */
    620 /* Common */
    621 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    622 /* GMII related */
    623 static void	wm_gmii_reset(struct wm_softc *);
    624 static int	wm_get_phy_id_82575(struct wm_softc *);
    625 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    626 static int	wm_gmii_mediachange(struct ifnet *);
    627 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    628 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    629 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    630 static int	wm_gmii_i82543_readreg(device_t, int, int);
    631 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    632 static int	wm_gmii_i82544_readreg(device_t, int, int);
    633 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    634 static int	wm_gmii_i80003_readreg(device_t, int, int);
    635 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    636 static int	wm_gmii_bm_readreg(device_t, int, int);
    637 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    638 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    639 static int	wm_gmii_hv_readreg(device_t, int, int);
    640 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    641 static int	wm_gmii_82580_readreg(device_t, int, int);
    642 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    643 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    644 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    645 static void	wm_gmii_statchg(struct ifnet *);
    646 static int	wm_kmrn_readreg(struct wm_softc *, int);
    647 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    648 /* SGMII */
    649 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    650 static int	wm_sgmii_readreg(device_t, int, int);
    651 static void	wm_sgmii_writereg(device_t, int, int, int);
    652 /* TBI related */
    653 static void	wm_tbi_mediainit(struct wm_softc *);
    654 static int	wm_tbi_mediachange(struct ifnet *);
    655 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    656 static int	wm_check_for_link(struct wm_softc *);
    657 static void	wm_tbi_tick(struct wm_softc *);
    658 /* SERDES related */
    659 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    660 static int	wm_serdes_mediachange(struct ifnet *);
    661 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    662 static void	wm_serdes_tick(struct wm_softc *);
    663 /* SFP related */
    664 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    665 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    666 
    667 /*
    668  * NVM related.
    669  * Microwire, SPI (w/wo EERD) and Flash.
    670  */
    671 /* Misc functions */
    672 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    673 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    674 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    675 /* Microwire */
    676 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    677 /* SPI */
    678 static int	wm_nvm_ready_spi(struct wm_softc *);
    679 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    680 /* Using with EERD */
    681 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    682 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    683 /* Flash */
    684 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    685     unsigned int *);
    686 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    687 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    688 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    689 	uint16_t *);
    690 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    691 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    692 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    693 /* iNVM */
    694 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    695 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    696 /* Lock, detecting NVM type, validate checksum and read */
    697 static int	wm_nvm_acquire(struct wm_softc *);
    698 static void	wm_nvm_release(struct wm_softc *);
    699 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    700 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    701 static int	wm_nvm_validate_checksum(struct wm_softc *);
    702 static void	wm_nvm_version_invm(struct wm_softc *);
    703 static void	wm_nvm_version(struct wm_softc *);
    704 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    705 
    706 /*
    707  * Hardware semaphores.
    708  * Very complexed...
    709  */
    710 static int	wm_get_swsm_semaphore(struct wm_softc *);
    711 static void	wm_put_swsm_semaphore(struct wm_softc *);
    712 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    713 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    714 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    715 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    716 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    717 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    718 
    719 /*
    720  * Management mode and power management related subroutines.
    721  * BMC, AMT, suspend/resume and EEE.
    722  */
    723 #ifdef WM_WOL
    724 static int	wm_check_mng_mode(struct wm_softc *);
    725 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    726 static int	wm_check_mng_mode_82574(struct wm_softc *);
    727 static int	wm_check_mng_mode_generic(struct wm_softc *);
    728 #endif
    729 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    730 static int	wm_check_reset_block(struct wm_softc *);
    731 static void	wm_get_hw_control(struct wm_softc *);
    732 static void	wm_release_hw_control(struct wm_softc *);
    733 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    734 static void	wm_smbustopci(struct wm_softc *);
    735 static void	wm_init_manageability(struct wm_softc *);
    736 static void	wm_release_manageability(struct wm_softc *);
    737 static void	wm_get_wakeup(struct wm_softc *);
    738 #ifdef WM_WOL
    739 static void	wm_enable_phy_wakeup(struct wm_softc *);
    740 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    741 static void	wm_enable_wakeup(struct wm_softc *);
    742 #endif
    743 /* LPLU (Low Power Link Up) */
    744 static void	wm_lplu_d0_disable(struct wm_softc *);
    745 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    746 /* EEE */
    747 static void	wm_set_eee_i350(struct wm_softc *);
    748 
    749 /*
    750  * Workarounds (mainly PHY related).
    751  * Basically, PHY's workarounds are in the PHY drivers.
    752  */
    753 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    754 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    755 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    756 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    757 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    758 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    759 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    760 static void	wm_reset_init_script_82575(struct wm_softc *);
    761 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    762 static void	wm_pll_workaround_i210(struct wm_softc *);
    763 
    764 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    765     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    766 
    767 /*
    768  * Devices supported by this driver.
    769  */
    770 static const struct wm_product {
    771 	pci_vendor_id_t		wmp_vendor;
    772 	pci_product_id_t	wmp_product;
    773 	const char		*wmp_name;
    774 	wm_chip_type		wmp_type;
    775 	uint32_t		wmp_flags;
    776 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    777 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    778 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    779 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    780 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    781 } wm_products[] = {
    782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    783 	  "Intel i82542 1000BASE-X Ethernet",
    784 	  WM_T_82542_2_1,	WMP_F_FIBER },
    785 
    786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    787 	  "Intel i82543GC 1000BASE-X Ethernet",
    788 	  WM_T_82543,		WMP_F_FIBER },
    789 
    790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    791 	  "Intel i82543GC 1000BASE-T Ethernet",
    792 	  WM_T_82543,		WMP_F_COPPER },
    793 
    794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    795 	  "Intel i82544EI 1000BASE-T Ethernet",
    796 	  WM_T_82544,		WMP_F_COPPER },
    797 
    798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    799 	  "Intel i82544EI 1000BASE-X Ethernet",
    800 	  WM_T_82544,		WMP_F_FIBER },
    801 
    802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    803 	  "Intel i82544GC 1000BASE-T Ethernet",
    804 	  WM_T_82544,		WMP_F_COPPER },
    805 
    806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    807 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    808 	  WM_T_82544,		WMP_F_COPPER },
    809 
    810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    811 	  "Intel i82540EM 1000BASE-T Ethernet",
    812 	  WM_T_82540,		WMP_F_COPPER },
    813 
    814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    815 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    816 	  WM_T_82540,		WMP_F_COPPER },
    817 
    818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    819 	  "Intel i82540EP 1000BASE-T Ethernet",
    820 	  WM_T_82540,		WMP_F_COPPER },
    821 
    822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    823 	  "Intel i82540EP 1000BASE-T Ethernet",
    824 	  WM_T_82540,		WMP_F_COPPER },
    825 
    826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    827 	  "Intel i82540EP 1000BASE-T Ethernet",
    828 	  WM_T_82540,		WMP_F_COPPER },
    829 
    830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    831 	  "Intel i82545EM 1000BASE-T Ethernet",
    832 	  WM_T_82545,		WMP_F_COPPER },
    833 
    834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    835 	  "Intel i82545GM 1000BASE-T Ethernet",
    836 	  WM_T_82545_3,		WMP_F_COPPER },
    837 
    838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    839 	  "Intel i82545GM 1000BASE-X Ethernet",
    840 	  WM_T_82545_3,		WMP_F_FIBER },
    841 
    842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    843 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    844 	  WM_T_82545_3,		WMP_F_SERDES },
    845 
    846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    847 	  "Intel i82546EB 1000BASE-T Ethernet",
    848 	  WM_T_82546,		WMP_F_COPPER },
    849 
    850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    851 	  "Intel i82546EB 1000BASE-T Ethernet",
    852 	  WM_T_82546,		WMP_F_COPPER },
    853 
    854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    855 	  "Intel i82545EM 1000BASE-X Ethernet",
    856 	  WM_T_82545,		WMP_F_FIBER },
    857 
    858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    859 	  "Intel i82546EB 1000BASE-X Ethernet",
    860 	  WM_T_82546,		WMP_F_FIBER },
    861 
    862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    863 	  "Intel i82546GB 1000BASE-T Ethernet",
    864 	  WM_T_82546_3,		WMP_F_COPPER },
    865 
    866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    867 	  "Intel i82546GB 1000BASE-X Ethernet",
    868 	  WM_T_82546_3,		WMP_F_FIBER },
    869 
    870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    871 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    872 	  WM_T_82546_3,		WMP_F_SERDES },
    873 
    874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    875 	  "i82546GB quad-port Gigabit Ethernet",
    876 	  WM_T_82546_3,		WMP_F_COPPER },
    877 
    878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    879 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    880 	  WM_T_82546_3,		WMP_F_COPPER },
    881 
    882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    883 	  "Intel PRO/1000MT (82546GB)",
    884 	  WM_T_82546_3,		WMP_F_COPPER },
    885 
    886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    887 	  "Intel i82541EI 1000BASE-T Ethernet",
    888 	  WM_T_82541,		WMP_F_COPPER },
    889 
    890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    891 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    892 	  WM_T_82541,		WMP_F_COPPER },
    893 
    894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    895 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    896 	  WM_T_82541,		WMP_F_COPPER },
    897 
    898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    899 	  "Intel i82541ER 1000BASE-T Ethernet",
    900 	  WM_T_82541_2,		WMP_F_COPPER },
    901 
    902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    903 	  "Intel i82541GI 1000BASE-T Ethernet",
    904 	  WM_T_82541_2,		WMP_F_COPPER },
    905 
    906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    907 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    908 	  WM_T_82541_2,		WMP_F_COPPER },
    909 
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    911 	  "Intel i82541PI 1000BASE-T Ethernet",
    912 	  WM_T_82541_2,		WMP_F_COPPER },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    915 	  "Intel i82547EI 1000BASE-T Ethernet",
    916 	  WM_T_82547,		WMP_F_COPPER },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    919 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    920 	  WM_T_82547,		WMP_F_COPPER },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    923 	  "Intel i82547GI 1000BASE-T Ethernet",
    924 	  WM_T_82547_2,		WMP_F_COPPER },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    927 	  "Intel PRO/1000 PT (82571EB)",
    928 	  WM_T_82571,		WMP_F_COPPER },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    931 	  "Intel PRO/1000 PF (82571EB)",
    932 	  WM_T_82571,		WMP_F_FIBER },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    935 	  "Intel PRO/1000 PB (82571EB)",
    936 	  WM_T_82571,		WMP_F_SERDES },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    939 	  "Intel PRO/1000 QT (82571EB)",
    940 	  WM_T_82571,		WMP_F_COPPER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    943 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    944 	  WM_T_82571,		WMP_F_COPPER, },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    947 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    948 	  WM_T_82571,		WMP_F_COPPER, },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    951 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    952 	  WM_T_82571,		WMP_F_SERDES, },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    955 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    956 	  WM_T_82571,		WMP_F_SERDES, },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    959 	  "Intel 82571EB Quad 1000baseX Ethernet",
    960 	  WM_T_82571,		WMP_F_FIBER, },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    963 	  "Intel i82572EI 1000baseT Ethernet",
    964 	  WM_T_82572,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    967 	  "Intel i82572EI 1000baseX Ethernet",
    968 	  WM_T_82572,		WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    971 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    972 	  WM_T_82572,		WMP_F_SERDES },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    975 	  "Intel i82572EI 1000baseT Ethernet",
    976 	  WM_T_82572,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    979 	  "Intel i82573E",
    980 	  WM_T_82573,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    983 	  "Intel i82573E IAMT",
    984 	  WM_T_82573,		WMP_F_COPPER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    987 	  "Intel i82573L Gigabit Ethernet",
    988 	  WM_T_82573,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    991 	  "Intel i82574L",
    992 	  WM_T_82574,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    995 	  "Intel i82574L",
    996 	  WM_T_82574,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    999 	  "Intel i82583V",
   1000 	  WM_T_82583,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1003 	  "i80003 dual 1000baseT Ethernet",
   1004 	  WM_T_80003,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1007 	  "i80003 dual 1000baseX Ethernet",
   1008 	  WM_T_80003,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1011 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1012 	  WM_T_80003,		WMP_F_SERDES },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1015 	  "Intel i80003 1000baseT Ethernet",
   1016 	  WM_T_80003,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1019 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1020 	  WM_T_80003,		WMP_F_SERDES },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1023 	  "Intel i82801H (M_AMT) LAN Controller",
   1024 	  WM_T_ICH8,		WMP_F_COPPER },
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1026 	  "Intel i82801H (AMT) LAN Controller",
   1027 	  WM_T_ICH8,		WMP_F_COPPER },
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1029 	  "Intel i82801H LAN Controller",
   1030 	  WM_T_ICH8,		WMP_F_COPPER },
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1032 	  "Intel i82801H (IFE) LAN Controller",
   1033 	  WM_T_ICH8,		WMP_F_COPPER },
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1035 	  "Intel i82801H (M) LAN Controller",
   1036 	  WM_T_ICH8,		WMP_F_COPPER },
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1038 	  "Intel i82801H IFE (GT) LAN Controller",
   1039 	  WM_T_ICH8,		WMP_F_COPPER },
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1041 	  "Intel i82801H IFE (G) LAN Controller",
   1042 	  WM_T_ICH8,		WMP_F_COPPER },
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1044 	  "82801I (AMT) LAN Controller",
   1045 	  WM_T_ICH9,		WMP_F_COPPER },
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1047 	  "82801I LAN Controller",
   1048 	  WM_T_ICH9,		WMP_F_COPPER },
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1050 	  "82801I (G) LAN Controller",
   1051 	  WM_T_ICH9,		WMP_F_COPPER },
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1053 	  "82801I (GT) LAN Controller",
   1054 	  WM_T_ICH9,		WMP_F_COPPER },
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1056 	  "82801I (C) LAN Controller",
   1057 	  WM_T_ICH9,		WMP_F_COPPER },
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1059 	  "82801I mobile LAN Controller",
   1060 	  WM_T_ICH9,		WMP_F_COPPER },
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1062 	  "82801I mobile (V) LAN Controller",
   1063 	  WM_T_ICH9,		WMP_F_COPPER },
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1065 	  "82801I mobile (AMT) LAN Controller",
   1066 	  WM_T_ICH9,		WMP_F_COPPER },
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1068 	  "82567LM-4 LAN Controller",
   1069 	  WM_T_ICH9,		WMP_F_COPPER },
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1071 	  "82567V-3 LAN Controller",
   1072 	  WM_T_ICH9,		WMP_F_COPPER },
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1074 	  "82567LM-2 LAN Controller",
   1075 	  WM_T_ICH10,		WMP_F_COPPER },
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1077 	  "82567LF-2 LAN Controller",
   1078 	  WM_T_ICH10,		WMP_F_COPPER },
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1080 	  "82567LM-3 LAN Controller",
   1081 	  WM_T_ICH10,		WMP_F_COPPER },
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1083 	  "82567LF-3 LAN Controller",
   1084 	  WM_T_ICH10,		WMP_F_COPPER },
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1086 	  "82567V-2 LAN Controller",
   1087 	  WM_T_ICH10,		WMP_F_COPPER },
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1089 	  "82567V-3? LAN Controller",
   1090 	  WM_T_ICH10,		WMP_F_COPPER },
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1092 	  "HANKSVILLE LAN Controller",
   1093 	  WM_T_ICH10,		WMP_F_COPPER },
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1095 	  "PCH LAN (82577LM) Controller",
   1096 	  WM_T_PCH,		WMP_F_COPPER },
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1098 	  "PCH LAN (82577LC) Controller",
   1099 	  WM_T_PCH,		WMP_F_COPPER },
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1101 	  "PCH LAN (82578DM) Controller",
   1102 	  WM_T_PCH,		WMP_F_COPPER },
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1104 	  "PCH LAN (82578DC) Controller",
   1105 	  WM_T_PCH,		WMP_F_COPPER },
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1107 	  "PCH2 LAN (82579LM) Controller",
   1108 	  WM_T_PCH2,		WMP_F_COPPER },
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1110 	  "PCH2 LAN (82579V) Controller",
   1111 	  WM_T_PCH2,		WMP_F_COPPER },
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1113 	  "82575EB dual-1000baseT Ethernet",
   1114 	  WM_T_82575,		WMP_F_COPPER },
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1116 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1117 	  WM_T_82575,		WMP_F_SERDES },
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1119 	  "82575GB quad-1000baseT Ethernet",
   1120 	  WM_T_82575,		WMP_F_COPPER },
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1122 	  "82575GB quad-1000baseT Ethernet (PM)",
   1123 	  WM_T_82575,		WMP_F_COPPER },
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1125 	  "82576 1000BaseT Ethernet",
   1126 	  WM_T_82576,		WMP_F_COPPER },
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1128 	  "82576 1000BaseX Ethernet",
   1129 	  WM_T_82576,		WMP_F_FIBER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1132 	  "82576 gigabit Ethernet (SERDES)",
   1133 	  WM_T_82576,		WMP_F_SERDES },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1136 	  "82576 quad-1000BaseT Ethernet",
   1137 	  WM_T_82576,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1140 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1141 	  WM_T_82576,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1144 	  "82576 gigabit Ethernet",
   1145 	  WM_T_82576,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1148 	  "82576 gigabit Ethernet (SERDES)",
   1149 	  WM_T_82576,		WMP_F_SERDES },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1151 	  "82576 quad-gigabit Ethernet (SERDES)",
   1152 	  WM_T_82576,		WMP_F_SERDES },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1155 	  "82580 1000BaseT Ethernet",
   1156 	  WM_T_82580,		WMP_F_COPPER },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1158 	  "82580 1000BaseX Ethernet",
   1159 	  WM_T_82580,		WMP_F_FIBER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1162 	  "82580 1000BaseT Ethernet (SERDES)",
   1163 	  WM_T_82580,		WMP_F_SERDES },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1166 	  "82580 gigabit Ethernet (SGMII)",
   1167 	  WM_T_82580,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1169 	  "82580 dual-1000BaseT Ethernet",
   1170 	  WM_T_82580,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1173 	  "82580 quad-1000BaseX Ethernet",
   1174 	  WM_T_82580,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1177 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1178 	  WM_T_82580,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1181 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1182 	  WM_T_82580,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1185 	  "DH89XXCC 1000BASE-KX Ethernet",
   1186 	  WM_T_82580,		WMP_F_SERDES },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1189 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1190 	  WM_T_82580,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1193 	  "I350 Gigabit Network Connection",
   1194 	  WM_T_I350,		WMP_F_COPPER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1197 	  "I350 Gigabit Fiber Network Connection",
   1198 	  WM_T_I350,		WMP_F_FIBER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1201 	  "I350 Gigabit Backplane Connection",
   1202 	  WM_T_I350,		WMP_F_SERDES },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1205 	  "I350 Quad Port Gigabit Ethernet",
   1206 	  WM_T_I350,		WMP_F_SERDES },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1209 	  "I350 Gigabit Connection",
   1210 	  WM_T_I350,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1213 	  "I354 Gigabit Ethernet (KX)",
   1214 	  WM_T_I354,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1217 	  "I354 Gigabit Ethernet (SGMII)",
   1218 	  WM_T_I354,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1221 	  "I354 Gigabit Ethernet (2.5G)",
   1222 	  WM_T_I354,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1225 	  "I210-T1 Ethernet Server Adapter",
   1226 	  WM_T_I210,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1229 	  "I210 Ethernet (Copper OEM)",
   1230 	  WM_T_I210,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1233 	  "I210 Ethernet (Copper IT)",
   1234 	  WM_T_I210,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1237 	  "I210 Ethernet (FLASH less)",
   1238 	  WM_T_I210,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1241 	  "I210 Gigabit Ethernet (Fiber)",
   1242 	  WM_T_I210,		WMP_F_FIBER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1245 	  "I210 Gigabit Ethernet (SERDES)",
   1246 	  WM_T_I210,		WMP_F_SERDES },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1249 	  "I210 Gigabit Ethernet (FLASH less)",
   1250 	  WM_T_I210,		WMP_F_SERDES },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1253 	  "I210 Gigabit Ethernet (SGMII)",
   1254 	  WM_T_I210,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1257 	  "I211 Ethernet (COPPER)",
   1258 	  WM_T_I211,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1260 	  "I217 V Ethernet Connection",
   1261 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1263 	  "I217 LM Ethernet Connection",
   1264 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1266 	  "I218 V Ethernet Connection",
   1267 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1269 	  "I218 V Ethernet Connection",
   1270 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1272 	  "I218 V Ethernet Connection",
   1273 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1275 	  "I218 LM Ethernet Connection",
   1276 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1278 	  "I218 LM Ethernet Connection",
   1279 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1281 	  "I218 LM Ethernet Connection",
   1282 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1283 	{ 0,			0,
   1284 	  NULL,
   1285 	  0,			0 },
   1286 };
   1287 
   1288 #ifdef WM_EVENT_COUNTERS
   1289 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1290 #endif /* WM_EVENT_COUNTERS */
   1291 
   1292 
   1293 /*
   1294  * Register read/write functions.
   1295  * Other than CSR_{READ|WRITE}().
   1296  */
   1297 
   1298 #if 0 /* Not currently used */
   1299 static inline uint32_t
   1300 wm_io_read(struct wm_softc *sc, int reg)
   1301 {
   1302 
   1303 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1304 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1305 }
   1306 #endif
   1307 
   1308 static inline void
   1309 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1310 {
   1311 
   1312 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1313 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1314 }
   1315 
   1316 static inline void
   1317 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1318     uint32_t data)
   1319 {
   1320 	uint32_t regval;
   1321 	int i;
   1322 
   1323 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1324 
   1325 	CSR_WRITE(sc, reg, regval);
   1326 
   1327 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1328 		delay(5);
   1329 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1330 			break;
   1331 	}
   1332 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1333 		aprint_error("%s: WARNING:"
   1334 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1335 		    device_xname(sc->sc_dev), reg);
   1336 	}
   1337 }
   1338 
   1339 static inline void
   1340 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1341 {
   1342 	wa->wa_low = htole32(v & 0xffffffffU);
   1343 	if (sizeof(bus_addr_t) == 8)
   1344 		wa->wa_high = htole32((uint64_t) v >> 32);
   1345 	else
   1346 		wa->wa_high = 0;
   1347 }
   1348 
   1349 /*
   1350  * Descriptor sync/init functions.
   1351  */
   1352 static inline void
   1353 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1354 {
   1355 	struct wm_softc *sc = txq->txq_sc;
   1356 
   1357 	/* If it will wrap around, sync to the end of the ring. */
   1358 	if ((start + num) > WM_NTXDESC(txq)) {
   1359 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1360 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1361 		    (WM_NTXDESC(txq) - start), ops);
   1362 		num -= (WM_NTXDESC(txq) - start);
   1363 		start = 0;
   1364 	}
   1365 
   1366 	/* Now sync whatever is left. */
   1367 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1368 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1369 }
   1370 
   1371 static inline void
   1372 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1373 {
   1374 	struct wm_softc *sc = rxq->rxq_sc;
   1375 
   1376 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1377 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1378 }
   1379 
   1380 static inline void
   1381 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1382 {
   1383 	struct wm_softc *sc = rxq->rxq_sc;
   1384 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1385 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1386 	struct mbuf *m = rxs->rxs_mbuf;
   1387 
   1388 	/*
   1389 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1390 	 * so that the payload after the Ethernet header is aligned
   1391 	 * to a 4-byte boundary.
   1392 
   1393 	 * XXX BRAINDAMAGE ALERT!
   1394 	 * The stupid chip uses the same size for every buffer, which
   1395 	 * is set in the Receive Control register.  We are using the 2K
   1396 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1397 	 * reason, we can't "scoot" packets longer than the standard
   1398 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1399 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1400 	 * the upper layer copy the headers.
   1401 	 */
   1402 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1403 
   1404 	wm_set_dma_addr(&rxd->wrx_addr,
   1405 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1406 	rxd->wrx_len = 0;
   1407 	rxd->wrx_cksum = 0;
   1408 	rxd->wrx_status = 0;
   1409 	rxd->wrx_errors = 0;
   1410 	rxd->wrx_special = 0;
   1411 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1412 
   1413 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1414 }
   1415 
   1416 /*
   1417  * Device driver interface functions and commonly used functions.
   1418  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1419  */
   1420 
   1421 /* Lookup supported device table */
   1422 static const struct wm_product *
   1423 wm_lookup(const struct pci_attach_args *pa)
   1424 {
   1425 	const struct wm_product *wmp;
   1426 
   1427 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1428 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1429 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1430 			return wmp;
   1431 	}
   1432 	return NULL;
   1433 }
   1434 
   1435 /* The match function (ca_match) */
   1436 static int
   1437 wm_match(device_t parent, cfdata_t cf, void *aux)
   1438 {
   1439 	struct pci_attach_args *pa = aux;
   1440 
   1441 	if (wm_lookup(pa) != NULL)
   1442 		return 1;
   1443 
   1444 	return 0;
   1445 }
   1446 
   1447 /* The attach function (ca_attach) */
   1448 static void
   1449 wm_attach(device_t parent, device_t self, void *aux)
   1450 {
   1451 	struct wm_softc *sc = device_private(self);
   1452 	struct pci_attach_args *pa = aux;
   1453 	prop_dictionary_t dict;
   1454 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1455 	pci_chipset_tag_t pc = pa->pa_pc;
   1456 	int counts[PCI_INTR_TYPE_SIZE];
   1457 	pci_intr_type_t max_type;
   1458 	const char *eetype, *xname;
   1459 	bus_space_tag_t memt;
   1460 	bus_space_handle_t memh;
   1461 	bus_size_t memsize;
   1462 	int memh_valid;
   1463 	int i, error;
   1464 	const struct wm_product *wmp;
   1465 	prop_data_t ea;
   1466 	prop_number_t pn;
   1467 	uint8_t enaddr[ETHER_ADDR_LEN];
   1468 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1469 	pcireg_t preg, memtype;
   1470 	uint16_t eeprom_data, apme_mask;
   1471 	bool force_clear_smbi;
   1472 	uint32_t link_mode;
   1473 	uint32_t reg;
   1474 
   1475 	sc->sc_dev = self;
   1476 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1477 	sc->sc_stopping = false;
   1478 
   1479 	wmp = wm_lookup(pa);
   1480 #ifdef DIAGNOSTIC
   1481 	if (wmp == NULL) {
   1482 		printf("\n");
   1483 		panic("wm_attach: impossible");
   1484 	}
   1485 #endif
   1486 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1487 
   1488 	sc->sc_pc = pa->pa_pc;
   1489 	sc->sc_pcitag = pa->pa_tag;
   1490 
   1491 	if (pci_dma64_available(pa))
   1492 		sc->sc_dmat = pa->pa_dmat64;
   1493 	else
   1494 		sc->sc_dmat = pa->pa_dmat;
   1495 
   1496 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1497 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1498 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1499 
   1500 	sc->sc_type = wmp->wmp_type;
   1501 	if (sc->sc_type < WM_T_82543) {
   1502 		if (sc->sc_rev < 2) {
   1503 			aprint_error_dev(sc->sc_dev,
   1504 			    "i82542 must be at least rev. 2\n");
   1505 			return;
   1506 		}
   1507 		if (sc->sc_rev < 3)
   1508 			sc->sc_type = WM_T_82542_2_0;
   1509 	}
   1510 
   1511 	/*
   1512 	 * Disable MSI for Errata:
   1513 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1514 	 *
   1515 	 *  82544: Errata 25
   1516 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1517 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1518 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1519 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1520 	 *
   1521 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1522 	 *
   1523 	 *  82571 & 82572: Errata 63
   1524 	 */
   1525 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1526 	    || (sc->sc_type == WM_T_82572))
   1527 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1528 
   1529 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1530 	    || (sc->sc_type == WM_T_82580)
   1531 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1532 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1533 		sc->sc_flags |= WM_F_NEWQUEUE;
   1534 
   1535 	/* Set device properties (mactype) */
   1536 	dict = device_properties(sc->sc_dev);
   1537 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1538 
   1539 	/*
   1540 	 * Map the device.  All devices support memory-mapped acccess,
   1541 	 * and it is really required for normal operation.
   1542 	 */
   1543 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1544 	switch (memtype) {
   1545 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1546 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1547 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1548 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1549 		break;
   1550 	default:
   1551 		memh_valid = 0;
   1552 		break;
   1553 	}
   1554 
   1555 	if (memh_valid) {
   1556 		sc->sc_st = memt;
   1557 		sc->sc_sh = memh;
   1558 		sc->sc_ss = memsize;
   1559 	} else {
   1560 		aprint_error_dev(sc->sc_dev,
   1561 		    "unable to map device registers\n");
   1562 		return;
   1563 	}
   1564 
   1565 	/*
   1566 	 * In addition, i82544 and later support I/O mapped indirect
   1567 	 * register access.  It is not desirable (nor supported in
   1568 	 * this driver) to use it for normal operation, though it is
   1569 	 * required to work around bugs in some chip versions.
   1570 	 */
   1571 	if (sc->sc_type >= WM_T_82544) {
   1572 		/* First we have to find the I/O BAR. */
   1573 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1574 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1575 			if (memtype == PCI_MAPREG_TYPE_IO)
   1576 				break;
   1577 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1578 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1579 				i += 4;	/* skip high bits, too */
   1580 		}
   1581 		if (i < PCI_MAPREG_END) {
   1582 			/*
   1583 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1584 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1585 			 * It's no problem because newer chips has no this
   1586 			 * bug.
   1587 			 *
   1588 			 * The i8254x doesn't apparently respond when the
   1589 			 * I/O BAR is 0, which looks somewhat like it's not
   1590 			 * been configured.
   1591 			 */
   1592 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1593 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1594 				aprint_error_dev(sc->sc_dev,
   1595 				    "WARNING: I/O BAR at zero.\n");
   1596 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1597 					0, &sc->sc_iot, &sc->sc_ioh,
   1598 					NULL, &sc->sc_ios) == 0) {
   1599 				sc->sc_flags |= WM_F_IOH_VALID;
   1600 			} else {
   1601 				aprint_error_dev(sc->sc_dev,
   1602 				    "WARNING: unable to map I/O space\n");
   1603 			}
   1604 		}
   1605 
   1606 	}
   1607 
   1608 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1609 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1610 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1611 	if (sc->sc_type < WM_T_82542_2_1)
   1612 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1613 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1614 
   1615 	/* power up chip */
   1616 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1617 	    NULL)) && error != EOPNOTSUPP) {
   1618 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1619 		return;
   1620 	}
   1621 
   1622 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1623 
   1624 	/* Allocation settings */
   1625 	max_type = PCI_INTR_TYPE_MSIX;
   1626 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1627 	counts[PCI_INTR_TYPE_MSI] = 1;
   1628 	counts[PCI_INTR_TYPE_INTX] = 1;
   1629 
   1630 alloc_retry:
   1631 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1632 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1633 		return;
   1634 	}
   1635 
   1636 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1637 		error = wm_setup_msix(sc);
   1638 		if (error) {
   1639 			pci_intr_release(pc, sc->sc_intrs,
   1640 			    counts[PCI_INTR_TYPE_MSIX]);
   1641 
   1642 			/* Setup for MSI: Disable MSI-X */
   1643 			max_type = PCI_INTR_TYPE_MSI;
   1644 			counts[PCI_INTR_TYPE_MSI] = 1;
   1645 			counts[PCI_INTR_TYPE_INTX] = 1;
   1646 			goto alloc_retry;
   1647 		}
   1648 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1649 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1650 		error = wm_setup_legacy(sc);
   1651 		if (error) {
   1652 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1653 			    counts[PCI_INTR_TYPE_MSI]);
   1654 
   1655 			/* The next try is for INTx: Disable MSI */
   1656 			max_type = PCI_INTR_TYPE_INTX;
   1657 			counts[PCI_INTR_TYPE_INTX] = 1;
   1658 			goto alloc_retry;
   1659 		}
   1660 	} else {
   1661 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1662 		error = wm_setup_legacy(sc);
   1663 		if (error) {
   1664 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1665 			    counts[PCI_INTR_TYPE_INTX]);
   1666 			return;
   1667 		}
   1668 	}
   1669 
   1670 	/*
   1671 	 * Check the function ID (unit number of the chip).
   1672 	 */
   1673 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1674 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1675 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1676 	    || (sc->sc_type == WM_T_82580)
   1677 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1678 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1679 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1680 	else
   1681 		sc->sc_funcid = 0;
   1682 
   1683 	/*
   1684 	 * Determine a few things about the bus we're connected to.
   1685 	 */
   1686 	if (sc->sc_type < WM_T_82543) {
   1687 		/* We don't really know the bus characteristics here. */
   1688 		sc->sc_bus_speed = 33;
   1689 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1690 		/*
   1691 		 * CSA (Communication Streaming Architecture) is about as fast
   1692 		 * a 32-bit 66MHz PCI Bus.
   1693 		 */
   1694 		sc->sc_flags |= WM_F_CSA;
   1695 		sc->sc_bus_speed = 66;
   1696 		aprint_verbose_dev(sc->sc_dev,
   1697 		    "Communication Streaming Architecture\n");
   1698 		if (sc->sc_type == WM_T_82547) {
   1699 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1700 			callout_setfunc(&sc->sc_txfifo_ch,
   1701 					wm_82547_txfifo_stall, sc);
   1702 			aprint_verbose_dev(sc->sc_dev,
   1703 			    "using 82547 Tx FIFO stall work-around\n");
   1704 		}
   1705 	} else if (sc->sc_type >= WM_T_82571) {
   1706 		sc->sc_flags |= WM_F_PCIE;
   1707 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1708 		    && (sc->sc_type != WM_T_ICH10)
   1709 		    && (sc->sc_type != WM_T_PCH)
   1710 		    && (sc->sc_type != WM_T_PCH2)
   1711 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1712 			/* ICH* and PCH* have no PCIe capability registers */
   1713 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1714 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1715 				NULL) == 0)
   1716 				aprint_error_dev(sc->sc_dev,
   1717 				    "unable to find PCIe capability\n");
   1718 		}
   1719 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1720 	} else {
   1721 		reg = CSR_READ(sc, WMREG_STATUS);
   1722 		if (reg & STATUS_BUS64)
   1723 			sc->sc_flags |= WM_F_BUS64;
   1724 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1725 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1726 
   1727 			sc->sc_flags |= WM_F_PCIX;
   1728 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1729 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1730 				aprint_error_dev(sc->sc_dev,
   1731 				    "unable to find PCIX capability\n");
   1732 			else if (sc->sc_type != WM_T_82545_3 &&
   1733 				 sc->sc_type != WM_T_82546_3) {
   1734 				/*
   1735 				 * Work around a problem caused by the BIOS
   1736 				 * setting the max memory read byte count
   1737 				 * incorrectly.
   1738 				 */
   1739 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1740 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1741 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1742 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1743 
   1744 				bytecnt =
   1745 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1746 				    PCIX_CMD_BYTECNT_SHIFT;
   1747 				maxb =
   1748 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1749 				    PCIX_STATUS_MAXB_SHIFT;
   1750 				if (bytecnt > maxb) {
   1751 					aprint_verbose_dev(sc->sc_dev,
   1752 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1753 					    512 << bytecnt, 512 << maxb);
   1754 					pcix_cmd = (pcix_cmd &
   1755 					    ~PCIX_CMD_BYTECNT_MASK) |
   1756 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1757 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1758 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1759 					    pcix_cmd);
   1760 				}
   1761 			}
   1762 		}
   1763 		/*
   1764 		 * The quad port adapter is special; it has a PCIX-PCIX
   1765 		 * bridge on the board, and can run the secondary bus at
   1766 		 * a higher speed.
   1767 		 */
   1768 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1769 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1770 								      : 66;
   1771 		} else if (sc->sc_flags & WM_F_PCIX) {
   1772 			switch (reg & STATUS_PCIXSPD_MASK) {
   1773 			case STATUS_PCIXSPD_50_66:
   1774 				sc->sc_bus_speed = 66;
   1775 				break;
   1776 			case STATUS_PCIXSPD_66_100:
   1777 				sc->sc_bus_speed = 100;
   1778 				break;
   1779 			case STATUS_PCIXSPD_100_133:
   1780 				sc->sc_bus_speed = 133;
   1781 				break;
   1782 			default:
   1783 				aprint_error_dev(sc->sc_dev,
   1784 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1785 				    reg & STATUS_PCIXSPD_MASK);
   1786 				sc->sc_bus_speed = 66;
   1787 				break;
   1788 			}
   1789 		} else
   1790 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1791 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1792 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1793 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1794 	}
   1795 
   1796 	/* clear interesting stat counters */
   1797 	CSR_READ(sc, WMREG_COLC);
   1798 	CSR_READ(sc, WMREG_RXERRC);
   1799 
   1800 	/* get PHY control from SMBus to PCIe */
   1801 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1802 	    || (sc->sc_type == WM_T_PCH_LPT))
   1803 		wm_smbustopci(sc);
   1804 
   1805 	/* Reset the chip to a known state. */
   1806 	wm_reset(sc);
   1807 
   1808 	/* Get some information about the EEPROM. */
   1809 	switch (sc->sc_type) {
   1810 	case WM_T_82542_2_0:
   1811 	case WM_T_82542_2_1:
   1812 	case WM_T_82543:
   1813 	case WM_T_82544:
   1814 		/* Microwire */
   1815 		sc->sc_nvm_wordsize = 64;
   1816 		sc->sc_nvm_addrbits = 6;
   1817 		break;
   1818 	case WM_T_82540:
   1819 	case WM_T_82545:
   1820 	case WM_T_82545_3:
   1821 	case WM_T_82546:
   1822 	case WM_T_82546_3:
   1823 		/* Microwire */
   1824 		reg = CSR_READ(sc, WMREG_EECD);
   1825 		if (reg & EECD_EE_SIZE) {
   1826 			sc->sc_nvm_wordsize = 256;
   1827 			sc->sc_nvm_addrbits = 8;
   1828 		} else {
   1829 			sc->sc_nvm_wordsize = 64;
   1830 			sc->sc_nvm_addrbits = 6;
   1831 		}
   1832 		sc->sc_flags |= WM_F_LOCK_EECD;
   1833 		break;
   1834 	case WM_T_82541:
   1835 	case WM_T_82541_2:
   1836 	case WM_T_82547:
   1837 	case WM_T_82547_2:
   1838 		sc->sc_flags |= WM_F_LOCK_EECD;
   1839 		reg = CSR_READ(sc, WMREG_EECD);
   1840 		if (reg & EECD_EE_TYPE) {
   1841 			/* SPI */
   1842 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1843 			wm_nvm_set_addrbits_size_eecd(sc);
   1844 		} else {
   1845 			/* Microwire */
   1846 			if ((reg & EECD_EE_ABITS) != 0) {
   1847 				sc->sc_nvm_wordsize = 256;
   1848 				sc->sc_nvm_addrbits = 8;
   1849 			} else {
   1850 				sc->sc_nvm_wordsize = 64;
   1851 				sc->sc_nvm_addrbits = 6;
   1852 			}
   1853 		}
   1854 		break;
   1855 	case WM_T_82571:
   1856 	case WM_T_82572:
   1857 		/* SPI */
   1858 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1859 		wm_nvm_set_addrbits_size_eecd(sc);
   1860 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1861 		break;
   1862 	case WM_T_82573:
   1863 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1864 		/* FALLTHROUGH */
   1865 	case WM_T_82574:
   1866 	case WM_T_82583:
   1867 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1868 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1869 			sc->sc_nvm_wordsize = 2048;
   1870 		} else {
   1871 			/* SPI */
   1872 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1873 			wm_nvm_set_addrbits_size_eecd(sc);
   1874 		}
   1875 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1876 		break;
   1877 	case WM_T_82575:
   1878 	case WM_T_82576:
   1879 	case WM_T_82580:
   1880 	case WM_T_I350:
   1881 	case WM_T_I354:
   1882 	case WM_T_80003:
   1883 		/* SPI */
   1884 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1885 		wm_nvm_set_addrbits_size_eecd(sc);
   1886 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1887 		    | WM_F_LOCK_SWSM;
   1888 		break;
   1889 	case WM_T_ICH8:
   1890 	case WM_T_ICH9:
   1891 	case WM_T_ICH10:
   1892 	case WM_T_PCH:
   1893 	case WM_T_PCH2:
   1894 	case WM_T_PCH_LPT:
   1895 		/* FLASH */
   1896 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1897 		sc->sc_nvm_wordsize = 2048;
   1898 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1899 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1900 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1901 			aprint_error_dev(sc->sc_dev,
   1902 			    "can't map FLASH registers\n");
   1903 			goto out;
   1904 		}
   1905 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1906 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1907 						ICH_FLASH_SECTOR_SIZE;
   1908 		sc->sc_ich8_flash_bank_size =
   1909 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1910 		sc->sc_ich8_flash_bank_size -=
   1911 		    (reg & ICH_GFPREG_BASE_MASK);
   1912 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1913 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1914 		break;
   1915 	case WM_T_I210:
   1916 	case WM_T_I211:
   1917 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1918 			wm_nvm_set_addrbits_size_eecd(sc);
   1919 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1920 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1921 		} else {
   1922 			sc->sc_nvm_wordsize = INVM_SIZE;
   1923 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1924 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1925 		}
   1926 		break;
   1927 	default:
   1928 		break;
   1929 	}
   1930 
   1931 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1932 	switch (sc->sc_type) {
   1933 	case WM_T_82571:
   1934 	case WM_T_82572:
   1935 		reg = CSR_READ(sc, WMREG_SWSM2);
   1936 		if ((reg & SWSM2_LOCK) == 0) {
   1937 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1938 			force_clear_smbi = true;
   1939 		} else
   1940 			force_clear_smbi = false;
   1941 		break;
   1942 	case WM_T_82573:
   1943 	case WM_T_82574:
   1944 	case WM_T_82583:
   1945 		force_clear_smbi = true;
   1946 		break;
   1947 	default:
   1948 		force_clear_smbi = false;
   1949 		break;
   1950 	}
   1951 	if (force_clear_smbi) {
   1952 		reg = CSR_READ(sc, WMREG_SWSM);
   1953 		if ((reg & SWSM_SMBI) != 0)
   1954 			aprint_error_dev(sc->sc_dev,
   1955 			    "Please update the Bootagent\n");
   1956 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1957 	}
   1958 
   1959 	/*
   1960 	 * Defer printing the EEPROM type until after verifying the checksum
   1961 	 * This allows the EEPROM type to be printed correctly in the case
   1962 	 * that no EEPROM is attached.
   1963 	 */
   1964 	/*
   1965 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1966 	 * this for later, so we can fail future reads from the EEPROM.
   1967 	 */
   1968 	if (wm_nvm_validate_checksum(sc)) {
   1969 		/*
   1970 		 * Read twice again because some PCI-e parts fail the
   1971 		 * first check due to the link being in sleep state.
   1972 		 */
   1973 		if (wm_nvm_validate_checksum(sc))
   1974 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1975 	}
   1976 
   1977 	/* Set device properties (macflags) */
   1978 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1979 
   1980 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1981 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   1982 	else {
   1983 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   1984 		    sc->sc_nvm_wordsize);
   1985 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   1986 			aprint_verbose("iNVM");
   1987 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   1988 			aprint_verbose("FLASH(HW)");
   1989 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   1990 			aprint_verbose("FLASH");
   1991 		else {
   1992 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   1993 				eetype = "SPI";
   1994 			else
   1995 				eetype = "MicroWire";
   1996 			aprint_verbose("(%d address bits) %s EEPROM",
   1997 			    sc->sc_nvm_addrbits, eetype);
   1998 		}
   1999 	}
   2000 	wm_nvm_version(sc);
   2001 	aprint_verbose("\n");
   2002 
   2003 	/* Check for I21[01] PLL workaround */
   2004 	if (sc->sc_type == WM_T_I210)
   2005 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2006 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2007 		/* NVM image release 3.25 has a workaround */
   2008 		if ((sc->sc_nvm_ver_major < 3)
   2009 		    || ((sc->sc_nvm_ver_major == 3)
   2010 			&& (sc->sc_nvm_ver_minor < 25))) {
   2011 			aprint_verbose_dev(sc->sc_dev,
   2012 			    "ROM image version %d.%d is older than 3.25\n",
   2013 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2014 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2015 		}
   2016 	}
   2017 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2018 		wm_pll_workaround_i210(sc);
   2019 
   2020 	wm_get_wakeup(sc);
   2021 	switch (sc->sc_type) {
   2022 	case WM_T_82571:
   2023 	case WM_T_82572:
   2024 	case WM_T_82573:
   2025 	case WM_T_82574:
   2026 	case WM_T_82583:
   2027 	case WM_T_80003:
   2028 	case WM_T_ICH8:
   2029 	case WM_T_ICH9:
   2030 	case WM_T_ICH10:
   2031 	case WM_T_PCH:
   2032 	case WM_T_PCH2:
   2033 	case WM_T_PCH_LPT:
   2034 		/* Non-AMT based hardware can now take control from firmware */
   2035 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2036 			wm_get_hw_control(sc);
   2037 		break;
   2038 	default:
   2039 		break;
   2040 	}
   2041 
   2042 	/*
   2043 	 * Read the Ethernet address from the EEPROM, if not first found
   2044 	 * in device properties.
   2045 	 */
   2046 	ea = prop_dictionary_get(dict, "mac-address");
   2047 	if (ea != NULL) {
   2048 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2049 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2050 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2051 	} else {
   2052 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2053 			aprint_error_dev(sc->sc_dev,
   2054 			    "unable to read Ethernet address\n");
   2055 			goto out;
   2056 		}
   2057 	}
   2058 
   2059 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2060 	    ether_sprintf(enaddr));
   2061 
   2062 	/*
   2063 	 * Read the config info from the EEPROM, and set up various
   2064 	 * bits in the control registers based on their contents.
   2065 	 */
   2066 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2067 	if (pn != NULL) {
   2068 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2069 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2070 	} else {
   2071 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2072 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2073 			goto out;
   2074 		}
   2075 	}
   2076 
   2077 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2078 	if (pn != NULL) {
   2079 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2080 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2081 	} else {
   2082 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2083 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2084 			goto out;
   2085 		}
   2086 	}
   2087 
   2088 	/* check for WM_F_WOL */
   2089 	switch (sc->sc_type) {
   2090 	case WM_T_82542_2_0:
   2091 	case WM_T_82542_2_1:
   2092 	case WM_T_82543:
   2093 		/* dummy? */
   2094 		eeprom_data = 0;
   2095 		apme_mask = NVM_CFG3_APME;
   2096 		break;
   2097 	case WM_T_82544:
   2098 		apme_mask = NVM_CFG2_82544_APM_EN;
   2099 		eeprom_data = cfg2;
   2100 		break;
   2101 	case WM_T_82546:
   2102 	case WM_T_82546_3:
   2103 	case WM_T_82571:
   2104 	case WM_T_82572:
   2105 	case WM_T_82573:
   2106 	case WM_T_82574:
   2107 	case WM_T_82583:
   2108 	case WM_T_80003:
   2109 	default:
   2110 		apme_mask = NVM_CFG3_APME;
   2111 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2112 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2113 		break;
   2114 	case WM_T_82575:
   2115 	case WM_T_82576:
   2116 	case WM_T_82580:
   2117 	case WM_T_I350:
   2118 	case WM_T_I354: /* XXX ok? */
   2119 	case WM_T_ICH8:
   2120 	case WM_T_ICH9:
   2121 	case WM_T_ICH10:
   2122 	case WM_T_PCH:
   2123 	case WM_T_PCH2:
   2124 	case WM_T_PCH_LPT:
   2125 		/* XXX The funcid should be checked on some devices */
   2126 		apme_mask = WUC_APME;
   2127 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2128 		break;
   2129 	}
   2130 
   2131 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2132 	if ((eeprom_data & apme_mask) != 0)
   2133 		sc->sc_flags |= WM_F_WOL;
   2134 #ifdef WM_DEBUG
   2135 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2136 		printf("WOL\n");
   2137 #endif
   2138 
   2139 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2140 		/* Check NVM for autonegotiation */
   2141 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2142 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2143 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2144 		}
   2145 	}
   2146 
   2147 	/*
   2148 	 * XXX need special handling for some multiple port cards
   2149 	 * to disable a paticular port.
   2150 	 */
   2151 
   2152 	if (sc->sc_type >= WM_T_82544) {
   2153 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2154 		if (pn != NULL) {
   2155 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2156 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2157 		} else {
   2158 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2159 				aprint_error_dev(sc->sc_dev,
   2160 				    "unable to read SWDPIN\n");
   2161 				goto out;
   2162 			}
   2163 		}
   2164 	}
   2165 
   2166 	if (cfg1 & NVM_CFG1_ILOS)
   2167 		sc->sc_ctrl |= CTRL_ILOS;
   2168 
   2169 	/*
   2170 	 * XXX
   2171 	 * This code isn't correct because pin 2 and 3 are located
   2172 	 * in different position on newer chips. Check all datasheet.
   2173 	 *
   2174 	 * Until resolve this problem, check if a chip < 82580
   2175 	 */
   2176 	if (sc->sc_type <= WM_T_82580) {
   2177 		if (sc->sc_type >= WM_T_82544) {
   2178 			sc->sc_ctrl |=
   2179 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2180 			    CTRL_SWDPIO_SHIFT;
   2181 			sc->sc_ctrl |=
   2182 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2183 			    CTRL_SWDPINS_SHIFT;
   2184 		} else {
   2185 			sc->sc_ctrl |=
   2186 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2187 			    CTRL_SWDPIO_SHIFT;
   2188 		}
   2189 	}
   2190 
   2191 	/* XXX For other than 82580? */
   2192 	if (sc->sc_type == WM_T_82580) {
   2193 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2194 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2195 		if (nvmword & __BIT(13)) {
   2196 			printf("SET ILOS\n");
   2197 			sc->sc_ctrl |= CTRL_ILOS;
   2198 		}
   2199 	}
   2200 
   2201 #if 0
   2202 	if (sc->sc_type >= WM_T_82544) {
   2203 		if (cfg1 & NVM_CFG1_IPS0)
   2204 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2205 		if (cfg1 & NVM_CFG1_IPS1)
   2206 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2207 		sc->sc_ctrl_ext |=
   2208 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2209 		    CTRL_EXT_SWDPIO_SHIFT;
   2210 		sc->sc_ctrl_ext |=
   2211 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2212 		    CTRL_EXT_SWDPINS_SHIFT;
   2213 	} else {
   2214 		sc->sc_ctrl_ext |=
   2215 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2216 		    CTRL_EXT_SWDPIO_SHIFT;
   2217 	}
   2218 #endif
   2219 
   2220 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2221 #if 0
   2222 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2223 #endif
   2224 
   2225 	if (sc->sc_type == WM_T_PCH) {
   2226 		uint16_t val;
   2227 
   2228 		/* Save the NVM K1 bit setting */
   2229 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2230 
   2231 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2232 			sc->sc_nvm_k1_enabled = 1;
   2233 		else
   2234 			sc->sc_nvm_k1_enabled = 0;
   2235 	}
   2236 
   2237 	/*
   2238 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2239 	 * media structures accordingly.
   2240 	 */
   2241 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2242 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2243 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2244 	    || sc->sc_type == WM_T_82573
   2245 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2246 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2247 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2248 	} else if (sc->sc_type < WM_T_82543 ||
   2249 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2250 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2251 			aprint_error_dev(sc->sc_dev,
   2252 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2253 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2254 		}
   2255 		wm_tbi_mediainit(sc);
   2256 	} else {
   2257 		switch (sc->sc_type) {
   2258 		case WM_T_82575:
   2259 		case WM_T_82576:
   2260 		case WM_T_82580:
   2261 		case WM_T_I350:
   2262 		case WM_T_I354:
   2263 		case WM_T_I210:
   2264 		case WM_T_I211:
   2265 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2266 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2267 			switch (link_mode) {
   2268 			case CTRL_EXT_LINK_MODE_1000KX:
   2269 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2270 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2271 				break;
   2272 			case CTRL_EXT_LINK_MODE_SGMII:
   2273 				if (wm_sgmii_uses_mdio(sc)) {
   2274 					aprint_verbose_dev(sc->sc_dev,
   2275 					    "SGMII(MDIO)\n");
   2276 					sc->sc_flags |= WM_F_SGMII;
   2277 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2278 					break;
   2279 				}
   2280 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2281 				/*FALLTHROUGH*/
   2282 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2283 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2284 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2285 					if (link_mode
   2286 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2287 						sc->sc_mediatype
   2288 						    = WM_MEDIATYPE_COPPER;
   2289 						sc->sc_flags |= WM_F_SGMII;
   2290 					} else {
   2291 						sc->sc_mediatype
   2292 						    = WM_MEDIATYPE_SERDES;
   2293 						aprint_verbose_dev(sc->sc_dev,
   2294 						    "SERDES\n");
   2295 					}
   2296 					break;
   2297 				}
   2298 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2299 					aprint_verbose_dev(sc->sc_dev,
   2300 					    "SERDES\n");
   2301 
   2302 				/* Change current link mode setting */
   2303 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2304 				switch (sc->sc_mediatype) {
   2305 				case WM_MEDIATYPE_COPPER:
   2306 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2307 					break;
   2308 				case WM_MEDIATYPE_SERDES:
   2309 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2310 					break;
   2311 				default:
   2312 					break;
   2313 				}
   2314 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2315 				break;
   2316 			case CTRL_EXT_LINK_MODE_GMII:
   2317 			default:
   2318 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2319 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2320 				break;
   2321 			}
   2322 
   2323 			reg &= ~CTRL_EXT_I2C_ENA;
   2324 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2325 				reg |= CTRL_EXT_I2C_ENA;
   2326 			else
   2327 				reg &= ~CTRL_EXT_I2C_ENA;
   2328 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2329 
   2330 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2331 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2332 			else
   2333 				wm_tbi_mediainit(sc);
   2334 			break;
   2335 		default:
   2336 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2337 				aprint_error_dev(sc->sc_dev,
   2338 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2339 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2340 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2341 		}
   2342 	}
   2343 
   2344 	ifp = &sc->sc_ethercom.ec_if;
   2345 	xname = device_xname(sc->sc_dev);
   2346 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2347 	ifp->if_softc = sc;
   2348 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2349 	ifp->if_ioctl = wm_ioctl;
   2350 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2351 		ifp->if_start = wm_nq_start;
   2352 	else
   2353 		ifp->if_start = wm_start;
   2354 	ifp->if_watchdog = wm_watchdog;
   2355 	ifp->if_init = wm_init;
   2356 	ifp->if_stop = wm_stop;
   2357 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2358 	IFQ_SET_READY(&ifp->if_snd);
   2359 
   2360 	/* Check for jumbo frame */
   2361 	switch (sc->sc_type) {
   2362 	case WM_T_82573:
   2363 		/* XXX limited to 9234 if ASPM is disabled */
   2364 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2365 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2366 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2367 		break;
   2368 	case WM_T_82571:
   2369 	case WM_T_82572:
   2370 	case WM_T_82574:
   2371 	case WM_T_82575:
   2372 	case WM_T_82576:
   2373 	case WM_T_82580:
   2374 	case WM_T_I350:
   2375 	case WM_T_I354: /* XXXX ok? */
   2376 	case WM_T_I210:
   2377 	case WM_T_I211:
   2378 	case WM_T_80003:
   2379 	case WM_T_ICH9:
   2380 	case WM_T_ICH10:
   2381 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2382 	case WM_T_PCH_LPT:
   2383 		/* XXX limited to 9234 */
   2384 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2385 		break;
   2386 	case WM_T_PCH:
   2387 		/* XXX limited to 4096 */
   2388 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2389 		break;
   2390 	case WM_T_82542_2_0:
   2391 	case WM_T_82542_2_1:
   2392 	case WM_T_82583:
   2393 	case WM_T_ICH8:
   2394 		/* No support for jumbo frame */
   2395 		break;
   2396 	default:
   2397 		/* ETHER_MAX_LEN_JUMBO */
   2398 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2399 		break;
   2400 	}
   2401 
   2402 	/* If we're a i82543 or greater, we can support VLANs. */
   2403 	if (sc->sc_type >= WM_T_82543)
   2404 		sc->sc_ethercom.ec_capabilities |=
   2405 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2406 
   2407 	/*
   2408 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2409 	 * on i82543 and later.
   2410 	 */
   2411 	if (sc->sc_type >= WM_T_82543) {
   2412 		ifp->if_capabilities |=
   2413 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2414 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2415 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2416 		    IFCAP_CSUM_TCPv6_Tx |
   2417 		    IFCAP_CSUM_UDPv6_Tx;
   2418 	}
   2419 
   2420 	/*
   2421 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2422 	 *
   2423 	 *	82541GI (8086:1076) ... no
   2424 	 *	82572EI (8086:10b9) ... yes
   2425 	 */
   2426 	if (sc->sc_type >= WM_T_82571) {
   2427 		ifp->if_capabilities |=
   2428 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2429 	}
   2430 
   2431 	/*
   2432 	 * If we're a i82544 or greater (except i82547), we can do
   2433 	 * TCP segmentation offload.
   2434 	 */
   2435 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2436 		ifp->if_capabilities |= IFCAP_TSOv4;
   2437 	}
   2438 
   2439 	if (sc->sc_type >= WM_T_82571) {
   2440 		ifp->if_capabilities |= IFCAP_TSOv6;
   2441 	}
   2442 
   2443 #ifdef WM_MPSAFE
   2444 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2445 #else
   2446 	sc->sc_core_lock = NULL;
   2447 #endif
   2448 
   2449 	/* Attach the interface. */
   2450 	if_attach(ifp);
   2451 	ether_ifattach(ifp, enaddr);
   2452 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2453 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2454 			  RND_FLAG_DEFAULT);
   2455 
   2456 #ifdef WM_EVENT_COUNTERS
   2457 	/* Attach event counters. */
   2458 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2459 	    NULL, xname, "txsstall");
   2460 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2461 	    NULL, xname, "txdstall");
   2462 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2463 	    NULL, xname, "txfifo_stall");
   2464 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2465 	    NULL, xname, "txdw");
   2466 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2467 	    NULL, xname, "txqe");
   2468 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2469 	    NULL, xname, "rxintr");
   2470 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2471 	    NULL, xname, "linkintr");
   2472 
   2473 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2474 	    NULL, xname, "rxipsum");
   2475 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2476 	    NULL, xname, "rxtusum");
   2477 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2478 	    NULL, xname, "txipsum");
   2479 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2480 	    NULL, xname, "txtusum");
   2481 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2482 	    NULL, xname, "txtusum6");
   2483 
   2484 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2485 	    NULL, xname, "txtso");
   2486 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2487 	    NULL, xname, "txtso6");
   2488 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2489 	    NULL, xname, "txtsopain");
   2490 
   2491 	for (i = 0; i < WM_NTXSEGS; i++) {
   2492 		snprintf(wm_txseg_evcnt_names[i],
   2493 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2494 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2495 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2496 	}
   2497 
   2498 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2499 	    NULL, xname, "txdrop");
   2500 
   2501 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2502 	    NULL, xname, "tu");
   2503 
   2504 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2505 	    NULL, xname, "tx_xoff");
   2506 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2507 	    NULL, xname, "tx_xon");
   2508 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2509 	    NULL, xname, "rx_xoff");
   2510 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2511 	    NULL, xname, "rx_xon");
   2512 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2513 	    NULL, xname, "rx_macctl");
   2514 #endif /* WM_EVENT_COUNTERS */
   2515 
   2516 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2517 		pmf_class_network_register(self, ifp);
   2518 	else
   2519 		aprint_error_dev(self, "couldn't establish power handler\n");
   2520 
   2521 	sc->sc_flags |= WM_F_ATTACHED;
   2522  out:
   2523 	return;
   2524 }
   2525 
   2526 /* The detach function (ca_detach) */
   2527 static int
   2528 wm_detach(device_t self, int flags __unused)
   2529 {
   2530 	struct wm_softc *sc = device_private(self);
   2531 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2532 	int i;
   2533 #ifndef WM_MPSAFE
   2534 	int s;
   2535 #endif
   2536 
   2537 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2538 		return 0;
   2539 
   2540 #ifndef WM_MPSAFE
   2541 	s = splnet();
   2542 #endif
   2543 	/* Stop the interface. Callouts are stopped in it. */
   2544 	wm_stop(ifp, 1);
   2545 
   2546 #ifndef WM_MPSAFE
   2547 	splx(s);
   2548 #endif
   2549 
   2550 	pmf_device_deregister(self);
   2551 
   2552 	/* Tell the firmware about the release */
   2553 	WM_CORE_LOCK(sc);
   2554 	wm_release_manageability(sc);
   2555 	wm_release_hw_control(sc);
   2556 	WM_CORE_UNLOCK(sc);
   2557 
   2558 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2559 
   2560 	/* Delete all remaining media. */
   2561 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2562 
   2563 	ether_ifdetach(ifp);
   2564 	if_detach(ifp);
   2565 
   2566 
   2567 	/* Unload RX dmamaps and free mbufs */
   2568 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2569 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2570 		WM_RX_LOCK(rxq);
   2571 		wm_rxdrain(rxq);
   2572 		WM_RX_UNLOCK(rxq);
   2573 	}
   2574 	/* Must unlock here */
   2575 
   2576 	wm_free_txrx_queues(sc);
   2577 
   2578 	/* Disestablish the interrupt handler */
   2579 	for (i = 0; i < sc->sc_nintrs; i++) {
   2580 		if (sc->sc_ihs[i] != NULL) {
   2581 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2582 			sc->sc_ihs[i] = NULL;
   2583 		}
   2584 	}
   2585 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2586 
   2587 	/* Unmap the registers */
   2588 	if (sc->sc_ss) {
   2589 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2590 		sc->sc_ss = 0;
   2591 	}
   2592 	if (sc->sc_ios) {
   2593 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2594 		sc->sc_ios = 0;
   2595 	}
   2596 	if (sc->sc_flashs) {
   2597 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2598 		sc->sc_flashs = 0;
   2599 	}
   2600 
   2601 	if (sc->sc_core_lock)
   2602 		mutex_obj_free(sc->sc_core_lock);
   2603 
   2604 	return 0;
   2605 }
   2606 
   2607 static bool
   2608 wm_suspend(device_t self, const pmf_qual_t *qual)
   2609 {
   2610 	struct wm_softc *sc = device_private(self);
   2611 
   2612 	wm_release_manageability(sc);
   2613 	wm_release_hw_control(sc);
   2614 #ifdef WM_WOL
   2615 	wm_enable_wakeup(sc);
   2616 #endif
   2617 
   2618 	return true;
   2619 }
   2620 
   2621 static bool
   2622 wm_resume(device_t self, const pmf_qual_t *qual)
   2623 {
   2624 	struct wm_softc *sc = device_private(self);
   2625 
   2626 	wm_init_manageability(sc);
   2627 
   2628 	return true;
   2629 }
   2630 
   2631 /*
   2632  * wm_watchdog:		[ifnet interface function]
   2633  *
   2634  *	Watchdog timer handler.
   2635  */
   2636 static void
   2637 wm_watchdog(struct ifnet *ifp)
   2638 {
   2639 	struct wm_softc *sc = ifp->if_softc;
   2640 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2641 
   2642 	/*
   2643 	 * Since we're using delayed interrupts, sweep up
   2644 	 * before we report an error.
   2645 	 */
   2646 	WM_TX_LOCK(txq);
   2647 	wm_txeof(sc);
   2648 	WM_TX_UNLOCK(txq);
   2649 
   2650 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2651 #ifdef WM_DEBUG
   2652 		int i, j;
   2653 		struct wm_txsoft *txs;
   2654 #endif
   2655 		log(LOG_ERR,
   2656 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2657 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2658 		    txq->txq_next);
   2659 		ifp->if_oerrors++;
   2660 #ifdef WM_DEBUG
   2661 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2662 		    i = WM_NEXTTXS(txq, i)) {
   2663 		    txs = &txq->txq_soft[i];
   2664 		    printf("txs %d tx %d -> %d\n",
   2665 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2666 		    for (j = txs->txs_firstdesc; ;
   2667 			j = WM_NEXTTX(txq, j)) {
   2668 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2669 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2670 			printf("\t %#08x%08x\n",
   2671 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2672 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2673 			if (j == txs->txs_lastdesc)
   2674 				break;
   2675 			}
   2676 		}
   2677 #endif
   2678 		/* Reset the interface. */
   2679 		(void) wm_init(ifp);
   2680 	}
   2681 
   2682 	/* Try to get more packets going. */
   2683 	ifp->if_start(ifp);
   2684 }
   2685 
   2686 /*
   2687  * wm_tick:
   2688  *
   2689  *	One second timer, used to check link status, sweep up
   2690  *	completed transmit jobs, etc.
   2691  */
   2692 static void
   2693 wm_tick(void *arg)
   2694 {
   2695 	struct wm_softc *sc = arg;
   2696 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2697 #ifndef WM_MPSAFE
   2698 	int s;
   2699 
   2700 	s = splnet();
   2701 #endif
   2702 
   2703 	WM_CORE_LOCK(sc);
   2704 
   2705 	if (sc->sc_stopping)
   2706 		goto out;
   2707 
   2708 	if (sc->sc_type >= WM_T_82542_2_1) {
   2709 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2710 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2711 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2712 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2713 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2714 	}
   2715 
   2716 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2717 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2718 	    + CSR_READ(sc, WMREG_CRCERRS)
   2719 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2720 	    + CSR_READ(sc, WMREG_SYMERRC)
   2721 	    + CSR_READ(sc, WMREG_RXERRC)
   2722 	    + CSR_READ(sc, WMREG_SEC)
   2723 	    + CSR_READ(sc, WMREG_CEXTERR)
   2724 	    + CSR_READ(sc, WMREG_RLEC);
   2725 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2726 
   2727 	if (sc->sc_flags & WM_F_HAS_MII)
   2728 		mii_tick(&sc->sc_mii);
   2729 	else if ((sc->sc_type >= WM_T_82575)
   2730 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2731 		wm_serdes_tick(sc);
   2732 	else
   2733 		wm_tbi_tick(sc);
   2734 
   2735 out:
   2736 	WM_CORE_UNLOCK(sc);
   2737 #ifndef WM_MPSAFE
   2738 	splx(s);
   2739 #endif
   2740 
   2741 	if (!sc->sc_stopping)
   2742 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2743 }
   2744 
   2745 static int
   2746 wm_ifflags_cb(struct ethercom *ec)
   2747 {
   2748 	struct ifnet *ifp = &ec->ec_if;
   2749 	struct wm_softc *sc = ifp->if_softc;
   2750 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2751 	int rc = 0;
   2752 
   2753 	WM_CORE_LOCK(sc);
   2754 
   2755 	if (change != 0)
   2756 		sc->sc_if_flags = ifp->if_flags;
   2757 
   2758 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2759 		rc = ENETRESET;
   2760 		goto out;
   2761 	}
   2762 
   2763 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2764 		wm_set_filter(sc);
   2765 
   2766 	wm_set_vlan(sc);
   2767 
   2768 out:
   2769 	WM_CORE_UNLOCK(sc);
   2770 
   2771 	return rc;
   2772 }
   2773 
   2774 /*
   2775  * wm_ioctl:		[ifnet interface function]
   2776  *
   2777  *	Handle control requests from the operator.
   2778  */
   2779 static int
   2780 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2781 {
   2782 	struct wm_softc *sc = ifp->if_softc;
   2783 	struct ifreq *ifr = (struct ifreq *) data;
   2784 	struct ifaddr *ifa = (struct ifaddr *)data;
   2785 	struct sockaddr_dl *sdl;
   2786 	int s, error;
   2787 
   2788 #ifndef WM_MPSAFE
   2789 	s = splnet();
   2790 #endif
   2791 	switch (cmd) {
   2792 	case SIOCSIFMEDIA:
   2793 	case SIOCGIFMEDIA:
   2794 		WM_CORE_LOCK(sc);
   2795 		/* Flow control requires full-duplex mode. */
   2796 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2797 		    (ifr->ifr_media & IFM_FDX) == 0)
   2798 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2799 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2800 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2801 				/* We can do both TXPAUSE and RXPAUSE. */
   2802 				ifr->ifr_media |=
   2803 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2804 			}
   2805 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2806 		}
   2807 		WM_CORE_UNLOCK(sc);
   2808 #ifdef WM_MPSAFE
   2809 		s = splnet();
   2810 #endif
   2811 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2812 #ifdef WM_MPSAFE
   2813 		splx(s);
   2814 #endif
   2815 		break;
   2816 	case SIOCINITIFADDR:
   2817 		WM_CORE_LOCK(sc);
   2818 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2819 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2820 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2821 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2822 			/* unicast address is first multicast entry */
   2823 			wm_set_filter(sc);
   2824 			error = 0;
   2825 			WM_CORE_UNLOCK(sc);
   2826 			break;
   2827 		}
   2828 		WM_CORE_UNLOCK(sc);
   2829 		/*FALLTHROUGH*/
   2830 	default:
   2831 #ifdef WM_MPSAFE
   2832 		s = splnet();
   2833 #endif
   2834 		/* It may call wm_start, so unlock here */
   2835 		error = ether_ioctl(ifp, cmd, data);
   2836 #ifdef WM_MPSAFE
   2837 		splx(s);
   2838 #endif
   2839 		if (error != ENETRESET)
   2840 			break;
   2841 
   2842 		error = 0;
   2843 
   2844 		if (cmd == SIOCSIFCAP) {
   2845 			error = (*ifp->if_init)(ifp);
   2846 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2847 			;
   2848 		else if (ifp->if_flags & IFF_RUNNING) {
   2849 			/*
   2850 			 * Multicast list has changed; set the hardware filter
   2851 			 * accordingly.
   2852 			 */
   2853 			WM_CORE_LOCK(sc);
   2854 			wm_set_filter(sc);
   2855 			WM_CORE_UNLOCK(sc);
   2856 		}
   2857 		break;
   2858 	}
   2859 
   2860 #ifndef WM_MPSAFE
   2861 	splx(s);
   2862 #endif
   2863 	return error;
   2864 }
   2865 
   2866 /* MAC address related */
   2867 
   2868 /*
   2869  * Get the offset of MAC address and return it.
   2870  * If error occured, use offset 0.
   2871  */
   2872 static uint16_t
   2873 wm_check_alt_mac_addr(struct wm_softc *sc)
   2874 {
   2875 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2876 	uint16_t offset = NVM_OFF_MACADDR;
   2877 
   2878 	/* Try to read alternative MAC address pointer */
   2879 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2880 		return 0;
   2881 
   2882 	/* Check pointer if it's valid or not. */
   2883 	if ((offset == 0x0000) || (offset == 0xffff))
   2884 		return 0;
   2885 
   2886 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2887 	/*
   2888 	 * Check whether alternative MAC address is valid or not.
   2889 	 * Some cards have non 0xffff pointer but those don't use
   2890 	 * alternative MAC address in reality.
   2891 	 *
   2892 	 * Check whether the broadcast bit is set or not.
   2893 	 */
   2894 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2895 		if (((myea[0] & 0xff) & 0x01) == 0)
   2896 			return offset; /* Found */
   2897 
   2898 	/* Not found */
   2899 	return 0;
   2900 }
   2901 
   2902 static int
   2903 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2904 {
   2905 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2906 	uint16_t offset = NVM_OFF_MACADDR;
   2907 	int do_invert = 0;
   2908 
   2909 	switch (sc->sc_type) {
   2910 	case WM_T_82580:
   2911 	case WM_T_I350:
   2912 	case WM_T_I354:
   2913 		/* EEPROM Top Level Partitioning */
   2914 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2915 		break;
   2916 	case WM_T_82571:
   2917 	case WM_T_82575:
   2918 	case WM_T_82576:
   2919 	case WM_T_80003:
   2920 	case WM_T_I210:
   2921 	case WM_T_I211:
   2922 		offset = wm_check_alt_mac_addr(sc);
   2923 		if (offset == 0)
   2924 			if ((sc->sc_funcid & 0x01) == 1)
   2925 				do_invert = 1;
   2926 		break;
   2927 	default:
   2928 		if ((sc->sc_funcid & 0x01) == 1)
   2929 			do_invert = 1;
   2930 		break;
   2931 	}
   2932 
   2933 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2934 		myea) != 0)
   2935 		goto bad;
   2936 
   2937 	enaddr[0] = myea[0] & 0xff;
   2938 	enaddr[1] = myea[0] >> 8;
   2939 	enaddr[2] = myea[1] & 0xff;
   2940 	enaddr[3] = myea[1] >> 8;
   2941 	enaddr[4] = myea[2] & 0xff;
   2942 	enaddr[5] = myea[2] >> 8;
   2943 
   2944 	/*
   2945 	 * Toggle the LSB of the MAC address on the second port
   2946 	 * of some dual port cards.
   2947 	 */
   2948 	if (do_invert != 0)
   2949 		enaddr[5] ^= 1;
   2950 
   2951 	return 0;
   2952 
   2953  bad:
   2954 	return -1;
   2955 }
   2956 
   2957 /*
   2958  * wm_set_ral:
   2959  *
   2960  *	Set an entery in the receive address list.
   2961  */
   2962 static void
   2963 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2964 {
   2965 	uint32_t ral_lo, ral_hi;
   2966 
   2967 	if (enaddr != NULL) {
   2968 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2969 		    (enaddr[3] << 24);
   2970 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2971 		ral_hi |= RAL_AV;
   2972 	} else {
   2973 		ral_lo = 0;
   2974 		ral_hi = 0;
   2975 	}
   2976 
   2977 	if (sc->sc_type >= WM_T_82544) {
   2978 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2979 		    ral_lo);
   2980 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2981 		    ral_hi);
   2982 	} else {
   2983 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2984 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2985 	}
   2986 }
   2987 
   2988 /*
   2989  * wm_mchash:
   2990  *
   2991  *	Compute the hash of the multicast address for the 4096-bit
   2992  *	multicast filter.
   2993  */
   2994 static uint32_t
   2995 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2996 {
   2997 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2998 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2999 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3000 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3001 	uint32_t hash;
   3002 
   3003 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3004 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3005 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3006 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3007 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3008 		return (hash & 0x3ff);
   3009 	}
   3010 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3011 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3012 
   3013 	return (hash & 0xfff);
   3014 }
   3015 
   3016 /*
   3017  * wm_set_filter:
   3018  *
   3019  *	Set up the receive filter.
   3020  */
   3021 static void
   3022 wm_set_filter(struct wm_softc *sc)
   3023 {
   3024 	struct ethercom *ec = &sc->sc_ethercom;
   3025 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3026 	struct ether_multi *enm;
   3027 	struct ether_multistep step;
   3028 	bus_addr_t mta_reg;
   3029 	uint32_t hash, reg, bit;
   3030 	int i, size;
   3031 
   3032 	if (sc->sc_type >= WM_T_82544)
   3033 		mta_reg = WMREG_CORDOVA_MTA;
   3034 	else
   3035 		mta_reg = WMREG_MTA;
   3036 
   3037 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3038 
   3039 	if (ifp->if_flags & IFF_BROADCAST)
   3040 		sc->sc_rctl |= RCTL_BAM;
   3041 	if (ifp->if_flags & IFF_PROMISC) {
   3042 		sc->sc_rctl |= RCTL_UPE;
   3043 		goto allmulti;
   3044 	}
   3045 
   3046 	/*
   3047 	 * Set the station address in the first RAL slot, and
   3048 	 * clear the remaining slots.
   3049 	 */
   3050 	if (sc->sc_type == WM_T_ICH8)
   3051 		size = WM_RAL_TABSIZE_ICH8 -1;
   3052 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3053 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3054 	    || (sc->sc_type == WM_T_PCH_LPT))
   3055 		size = WM_RAL_TABSIZE_ICH8;
   3056 	else if (sc->sc_type == WM_T_82575)
   3057 		size = WM_RAL_TABSIZE_82575;
   3058 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3059 		size = WM_RAL_TABSIZE_82576;
   3060 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3061 		size = WM_RAL_TABSIZE_I350;
   3062 	else
   3063 		size = WM_RAL_TABSIZE;
   3064 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3065 	for (i = 1; i < size; i++)
   3066 		wm_set_ral(sc, NULL, i);
   3067 
   3068 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3069 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3070 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3071 		size = WM_ICH8_MC_TABSIZE;
   3072 	else
   3073 		size = WM_MC_TABSIZE;
   3074 	/* Clear out the multicast table. */
   3075 	for (i = 0; i < size; i++)
   3076 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3077 
   3078 	ETHER_FIRST_MULTI(step, ec, enm);
   3079 	while (enm != NULL) {
   3080 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3081 			/*
   3082 			 * We must listen to a range of multicast addresses.
   3083 			 * For now, just accept all multicasts, rather than
   3084 			 * trying to set only those filter bits needed to match
   3085 			 * the range.  (At this time, the only use of address
   3086 			 * ranges is for IP multicast routing, for which the
   3087 			 * range is big enough to require all bits set.)
   3088 			 */
   3089 			goto allmulti;
   3090 		}
   3091 
   3092 		hash = wm_mchash(sc, enm->enm_addrlo);
   3093 
   3094 		reg = (hash >> 5);
   3095 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3096 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3097 		    || (sc->sc_type == WM_T_PCH2)
   3098 		    || (sc->sc_type == WM_T_PCH_LPT))
   3099 			reg &= 0x1f;
   3100 		else
   3101 			reg &= 0x7f;
   3102 		bit = hash & 0x1f;
   3103 
   3104 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3105 		hash |= 1U << bit;
   3106 
   3107 		/* XXX Hardware bug?? */
   3108 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3109 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3110 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3111 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3112 		} else
   3113 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3114 
   3115 		ETHER_NEXT_MULTI(step, enm);
   3116 	}
   3117 
   3118 	ifp->if_flags &= ~IFF_ALLMULTI;
   3119 	goto setit;
   3120 
   3121  allmulti:
   3122 	ifp->if_flags |= IFF_ALLMULTI;
   3123 	sc->sc_rctl |= RCTL_MPE;
   3124 
   3125  setit:
   3126 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3127 }
   3128 
   3129 /* Reset and init related */
   3130 
   3131 static void
   3132 wm_set_vlan(struct wm_softc *sc)
   3133 {
   3134 	/* Deal with VLAN enables. */
   3135 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3136 		sc->sc_ctrl |= CTRL_VME;
   3137 	else
   3138 		sc->sc_ctrl &= ~CTRL_VME;
   3139 
   3140 	/* Write the control registers. */
   3141 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3142 }
   3143 
   3144 static void
   3145 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3146 {
   3147 	uint32_t gcr;
   3148 	pcireg_t ctrl2;
   3149 
   3150 	gcr = CSR_READ(sc, WMREG_GCR);
   3151 
   3152 	/* Only take action if timeout value is defaulted to 0 */
   3153 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3154 		goto out;
   3155 
   3156 	if ((gcr & GCR_CAP_VER2) == 0) {
   3157 		gcr |= GCR_CMPL_TMOUT_10MS;
   3158 		goto out;
   3159 	}
   3160 
   3161 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3162 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3163 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3164 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3165 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3166 
   3167 out:
   3168 	/* Disable completion timeout resend */
   3169 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3170 
   3171 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3172 }
   3173 
   3174 void
   3175 wm_get_auto_rd_done(struct wm_softc *sc)
   3176 {
   3177 	int i;
   3178 
   3179 	/* wait for eeprom to reload */
   3180 	switch (sc->sc_type) {
   3181 	case WM_T_82571:
   3182 	case WM_T_82572:
   3183 	case WM_T_82573:
   3184 	case WM_T_82574:
   3185 	case WM_T_82583:
   3186 	case WM_T_82575:
   3187 	case WM_T_82576:
   3188 	case WM_T_82580:
   3189 	case WM_T_I350:
   3190 	case WM_T_I354:
   3191 	case WM_T_I210:
   3192 	case WM_T_I211:
   3193 	case WM_T_80003:
   3194 	case WM_T_ICH8:
   3195 	case WM_T_ICH9:
   3196 		for (i = 0; i < 10; i++) {
   3197 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3198 				break;
   3199 			delay(1000);
   3200 		}
   3201 		if (i == 10) {
   3202 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3203 			    "complete\n", device_xname(sc->sc_dev));
   3204 		}
   3205 		break;
   3206 	default:
   3207 		break;
   3208 	}
   3209 }
   3210 
   3211 void
   3212 wm_lan_init_done(struct wm_softc *sc)
   3213 {
   3214 	uint32_t reg = 0;
   3215 	int i;
   3216 
   3217 	/* wait for eeprom to reload */
   3218 	switch (sc->sc_type) {
   3219 	case WM_T_ICH10:
   3220 	case WM_T_PCH:
   3221 	case WM_T_PCH2:
   3222 	case WM_T_PCH_LPT:
   3223 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3224 			reg = CSR_READ(sc, WMREG_STATUS);
   3225 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3226 				break;
   3227 			delay(100);
   3228 		}
   3229 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3230 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3231 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3232 		}
   3233 		break;
   3234 	default:
   3235 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3236 		    __func__);
   3237 		break;
   3238 	}
   3239 
   3240 	reg &= ~STATUS_LAN_INIT_DONE;
   3241 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3242 }
   3243 
   3244 void
   3245 wm_get_cfg_done(struct wm_softc *sc)
   3246 {
   3247 	int mask;
   3248 	uint32_t reg;
   3249 	int i;
   3250 
   3251 	/* wait for eeprom to reload */
   3252 	switch (sc->sc_type) {
   3253 	case WM_T_82542_2_0:
   3254 	case WM_T_82542_2_1:
   3255 		/* null */
   3256 		break;
   3257 	case WM_T_82543:
   3258 	case WM_T_82544:
   3259 	case WM_T_82540:
   3260 	case WM_T_82545:
   3261 	case WM_T_82545_3:
   3262 	case WM_T_82546:
   3263 	case WM_T_82546_3:
   3264 	case WM_T_82541:
   3265 	case WM_T_82541_2:
   3266 	case WM_T_82547:
   3267 	case WM_T_82547_2:
   3268 	case WM_T_82573:
   3269 	case WM_T_82574:
   3270 	case WM_T_82583:
   3271 		/* generic */
   3272 		delay(10*1000);
   3273 		break;
   3274 	case WM_T_80003:
   3275 	case WM_T_82571:
   3276 	case WM_T_82572:
   3277 	case WM_T_82575:
   3278 	case WM_T_82576:
   3279 	case WM_T_82580:
   3280 	case WM_T_I350:
   3281 	case WM_T_I354:
   3282 	case WM_T_I210:
   3283 	case WM_T_I211:
   3284 		if (sc->sc_type == WM_T_82571) {
   3285 			/* Only 82571 shares port 0 */
   3286 			mask = EEMNGCTL_CFGDONE_0;
   3287 		} else
   3288 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3289 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3290 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3291 				break;
   3292 			delay(1000);
   3293 		}
   3294 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3295 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3296 				device_xname(sc->sc_dev), __func__));
   3297 		}
   3298 		break;
   3299 	case WM_T_ICH8:
   3300 	case WM_T_ICH9:
   3301 	case WM_T_ICH10:
   3302 	case WM_T_PCH:
   3303 	case WM_T_PCH2:
   3304 	case WM_T_PCH_LPT:
   3305 		delay(10*1000);
   3306 		if (sc->sc_type >= WM_T_ICH10)
   3307 			wm_lan_init_done(sc);
   3308 		else
   3309 			wm_get_auto_rd_done(sc);
   3310 
   3311 		reg = CSR_READ(sc, WMREG_STATUS);
   3312 		if ((reg & STATUS_PHYRA) != 0)
   3313 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3314 		break;
   3315 	default:
   3316 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3317 		    __func__);
   3318 		break;
   3319 	}
   3320 }
   3321 
   3322 /* Init hardware bits */
   3323 void
   3324 wm_initialize_hardware_bits(struct wm_softc *sc)
   3325 {
   3326 	uint32_t tarc0, tarc1, reg;
   3327 
   3328 	/* For 82571 variant, 80003 and ICHs */
   3329 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3330 	    || (sc->sc_type >= WM_T_80003)) {
   3331 
   3332 		/* Transmit Descriptor Control 0 */
   3333 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3334 		reg |= TXDCTL_COUNT_DESC;
   3335 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3336 
   3337 		/* Transmit Descriptor Control 1 */
   3338 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3339 		reg |= TXDCTL_COUNT_DESC;
   3340 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3341 
   3342 		/* TARC0 */
   3343 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3344 		switch (sc->sc_type) {
   3345 		case WM_T_82571:
   3346 		case WM_T_82572:
   3347 		case WM_T_82573:
   3348 		case WM_T_82574:
   3349 		case WM_T_82583:
   3350 		case WM_T_80003:
   3351 			/* Clear bits 30..27 */
   3352 			tarc0 &= ~__BITS(30, 27);
   3353 			break;
   3354 		default:
   3355 			break;
   3356 		}
   3357 
   3358 		switch (sc->sc_type) {
   3359 		case WM_T_82571:
   3360 		case WM_T_82572:
   3361 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3362 
   3363 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3364 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3365 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3366 			/* 8257[12] Errata No.7 */
   3367 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3368 
   3369 			/* TARC1 bit 28 */
   3370 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3371 				tarc1 &= ~__BIT(28);
   3372 			else
   3373 				tarc1 |= __BIT(28);
   3374 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3375 
   3376 			/*
   3377 			 * 8257[12] Errata No.13
   3378 			 * Disable Dyamic Clock Gating.
   3379 			 */
   3380 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3381 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3382 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3383 			break;
   3384 		case WM_T_82573:
   3385 		case WM_T_82574:
   3386 		case WM_T_82583:
   3387 			if ((sc->sc_type == WM_T_82574)
   3388 			    || (sc->sc_type == WM_T_82583))
   3389 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3390 
   3391 			/* Extended Device Control */
   3392 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3393 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3394 			reg |= __BIT(22);	/* Set bit 22 */
   3395 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3396 
   3397 			/* Device Control */
   3398 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3399 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3400 
   3401 			/* PCIe Control Register */
   3402 			/*
   3403 			 * 82573 Errata (unknown).
   3404 			 *
   3405 			 * 82574 Errata 25 and 82583 Errata 12
   3406 			 * "Dropped Rx Packets":
   3407 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3408 			 */
   3409 			reg = CSR_READ(sc, WMREG_GCR);
   3410 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3411 			CSR_WRITE(sc, WMREG_GCR, reg);
   3412 
   3413 			if ((sc->sc_type == WM_T_82574)
   3414 			    || (sc->sc_type == WM_T_82583)) {
   3415 				/*
   3416 				 * Document says this bit must be set for
   3417 				 * proper operation.
   3418 				 */
   3419 				reg = CSR_READ(sc, WMREG_GCR);
   3420 				reg |= __BIT(22);
   3421 				CSR_WRITE(sc, WMREG_GCR, reg);
   3422 
   3423 				/*
   3424 				 * Apply workaround for hardware errata
   3425 				 * documented in errata docs Fixes issue where
   3426 				 * some error prone or unreliable PCIe
   3427 				 * completions are occurring, particularly
   3428 				 * with ASPM enabled. Without fix, issue can
   3429 				 * cause Tx timeouts.
   3430 				 */
   3431 				reg = CSR_READ(sc, WMREG_GCR2);
   3432 				reg |= __BIT(0);
   3433 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3434 			}
   3435 			break;
   3436 		case WM_T_80003:
   3437 			/* TARC0 */
   3438 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3439 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3440 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3441 
   3442 			/* TARC1 bit 28 */
   3443 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3444 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3445 				tarc1 &= ~__BIT(28);
   3446 			else
   3447 				tarc1 |= __BIT(28);
   3448 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3449 			break;
   3450 		case WM_T_ICH8:
   3451 		case WM_T_ICH9:
   3452 		case WM_T_ICH10:
   3453 		case WM_T_PCH:
   3454 		case WM_T_PCH2:
   3455 		case WM_T_PCH_LPT:
   3456 			/* TARC 0 */
   3457 			if (sc->sc_type == WM_T_ICH8) {
   3458 				/* Set TARC0 bits 29 and 28 */
   3459 				tarc0 |= __BITS(29, 28);
   3460 			}
   3461 			/* Set TARC0 bits 23,24,26,27 */
   3462 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3463 
   3464 			/* CTRL_EXT */
   3465 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3466 			reg |= __BIT(22);	/* Set bit 22 */
   3467 			/*
   3468 			 * Enable PHY low-power state when MAC is at D3
   3469 			 * w/o WoL
   3470 			 */
   3471 			if (sc->sc_type >= WM_T_PCH)
   3472 				reg |= CTRL_EXT_PHYPDEN;
   3473 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3474 
   3475 			/* TARC1 */
   3476 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3477 			/* bit 28 */
   3478 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3479 				tarc1 &= ~__BIT(28);
   3480 			else
   3481 				tarc1 |= __BIT(28);
   3482 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3483 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3484 
   3485 			/* Device Status */
   3486 			if (sc->sc_type == WM_T_ICH8) {
   3487 				reg = CSR_READ(sc, WMREG_STATUS);
   3488 				reg &= ~__BIT(31);
   3489 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3490 
   3491 			}
   3492 
   3493 			/*
   3494 			 * Work-around descriptor data corruption issue during
   3495 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3496 			 * capability.
   3497 			 */
   3498 			reg = CSR_READ(sc, WMREG_RFCTL);
   3499 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3500 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3501 			break;
   3502 		default:
   3503 			break;
   3504 		}
   3505 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3506 
   3507 		/*
   3508 		 * 8257[12] Errata No.52 and some others.
   3509 		 * Avoid RSS Hash Value bug.
   3510 		 */
   3511 		switch (sc->sc_type) {
   3512 		case WM_T_82571:
   3513 		case WM_T_82572:
   3514 		case WM_T_82573:
   3515 		case WM_T_80003:
   3516 		case WM_T_ICH8:
   3517 			reg = CSR_READ(sc, WMREG_RFCTL);
   3518 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3519 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3520 			break;
   3521 		default:
   3522 			break;
   3523 		}
   3524 	}
   3525 }
   3526 
   3527 static uint32_t
   3528 wm_rxpbs_adjust_82580(uint32_t val)
   3529 {
   3530 	uint32_t rv = 0;
   3531 
   3532 	if (val < __arraycount(wm_82580_rxpbs_table))
   3533 		rv = wm_82580_rxpbs_table[val];
   3534 
   3535 	return rv;
   3536 }
   3537 
   3538 /*
   3539  * wm_reset:
   3540  *
   3541  *	Reset the i82542 chip.
   3542  */
   3543 static void
   3544 wm_reset(struct wm_softc *sc)
   3545 {
   3546 	int phy_reset = 0;
   3547 	int i, error = 0;
   3548 	uint32_t reg, mask;
   3549 
   3550 	/*
   3551 	 * Allocate on-chip memory according to the MTU size.
   3552 	 * The Packet Buffer Allocation register must be written
   3553 	 * before the chip is reset.
   3554 	 */
   3555 	switch (sc->sc_type) {
   3556 	case WM_T_82547:
   3557 	case WM_T_82547_2:
   3558 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3559 		    PBA_22K : PBA_30K;
   3560 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3561 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3562 			txq->txq_fifo_head = 0;
   3563 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3564 			txq->txq_fifo_size =
   3565 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3566 			txq->txq_fifo_stall = 0;
   3567 		}
   3568 		break;
   3569 	case WM_T_82571:
   3570 	case WM_T_82572:
   3571 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3572 	case WM_T_80003:
   3573 		sc->sc_pba = PBA_32K;
   3574 		break;
   3575 	case WM_T_82573:
   3576 		sc->sc_pba = PBA_12K;
   3577 		break;
   3578 	case WM_T_82574:
   3579 	case WM_T_82583:
   3580 		sc->sc_pba = PBA_20K;
   3581 		break;
   3582 	case WM_T_82576:
   3583 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3584 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3585 		break;
   3586 	case WM_T_82580:
   3587 	case WM_T_I350:
   3588 	case WM_T_I354:
   3589 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3590 		break;
   3591 	case WM_T_I210:
   3592 	case WM_T_I211:
   3593 		sc->sc_pba = PBA_34K;
   3594 		break;
   3595 	case WM_T_ICH8:
   3596 		/* Workaround for a bit corruption issue in FIFO memory */
   3597 		sc->sc_pba = PBA_8K;
   3598 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3599 		break;
   3600 	case WM_T_ICH9:
   3601 	case WM_T_ICH10:
   3602 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3603 		    PBA_14K : PBA_10K;
   3604 		break;
   3605 	case WM_T_PCH:
   3606 	case WM_T_PCH2:
   3607 	case WM_T_PCH_LPT:
   3608 		sc->sc_pba = PBA_26K;
   3609 		break;
   3610 	default:
   3611 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3612 		    PBA_40K : PBA_48K;
   3613 		break;
   3614 	}
   3615 	/*
   3616 	 * Only old or non-multiqueue devices have the PBA register
   3617 	 * XXX Need special handling for 82575.
   3618 	 */
   3619 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3620 	    || (sc->sc_type == WM_T_82575))
   3621 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3622 
   3623 	/* Prevent the PCI-E bus from sticking */
   3624 	if (sc->sc_flags & WM_F_PCIE) {
   3625 		int timeout = 800;
   3626 
   3627 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3628 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3629 
   3630 		while (timeout--) {
   3631 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3632 			    == 0)
   3633 				break;
   3634 			delay(100);
   3635 		}
   3636 	}
   3637 
   3638 	/* Set the completion timeout for interface */
   3639 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3640 	    || (sc->sc_type == WM_T_82580)
   3641 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3642 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3643 		wm_set_pcie_completion_timeout(sc);
   3644 
   3645 	/* Clear interrupt */
   3646 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3647 	if (sc->sc_nintrs > 1) {
   3648 		if (sc->sc_type != WM_T_82574) {
   3649 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3650 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3651 		} else {
   3652 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3653 		}
   3654 	}
   3655 
   3656 	/* Stop the transmit and receive processes. */
   3657 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3658 	sc->sc_rctl &= ~RCTL_EN;
   3659 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3660 	CSR_WRITE_FLUSH(sc);
   3661 
   3662 	/* XXX set_tbi_sbp_82543() */
   3663 
   3664 	delay(10*1000);
   3665 
   3666 	/* Must acquire the MDIO ownership before MAC reset */
   3667 	switch (sc->sc_type) {
   3668 	case WM_T_82573:
   3669 	case WM_T_82574:
   3670 	case WM_T_82583:
   3671 		error = wm_get_hw_semaphore_82573(sc);
   3672 		break;
   3673 	default:
   3674 		break;
   3675 	}
   3676 
   3677 	/*
   3678 	 * 82541 Errata 29? & 82547 Errata 28?
   3679 	 * See also the description about PHY_RST bit in CTRL register
   3680 	 * in 8254x_GBe_SDM.pdf.
   3681 	 */
   3682 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3683 		CSR_WRITE(sc, WMREG_CTRL,
   3684 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3685 		CSR_WRITE_FLUSH(sc);
   3686 		delay(5000);
   3687 	}
   3688 
   3689 	switch (sc->sc_type) {
   3690 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3691 	case WM_T_82541:
   3692 	case WM_T_82541_2:
   3693 	case WM_T_82547:
   3694 	case WM_T_82547_2:
   3695 		/*
   3696 		 * On some chipsets, a reset through a memory-mapped write
   3697 		 * cycle can cause the chip to reset before completing the
   3698 		 * write cycle.  This causes major headache that can be
   3699 		 * avoided by issuing the reset via indirect register writes
   3700 		 * through I/O space.
   3701 		 *
   3702 		 * So, if we successfully mapped the I/O BAR at attach time,
   3703 		 * use that.  Otherwise, try our luck with a memory-mapped
   3704 		 * reset.
   3705 		 */
   3706 		if (sc->sc_flags & WM_F_IOH_VALID)
   3707 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3708 		else
   3709 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3710 		break;
   3711 	case WM_T_82545_3:
   3712 	case WM_T_82546_3:
   3713 		/* Use the shadow control register on these chips. */
   3714 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3715 		break;
   3716 	case WM_T_80003:
   3717 		mask = swfwphysem[sc->sc_funcid];
   3718 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3719 		wm_get_swfw_semaphore(sc, mask);
   3720 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3721 		wm_put_swfw_semaphore(sc, mask);
   3722 		break;
   3723 	case WM_T_ICH8:
   3724 	case WM_T_ICH9:
   3725 	case WM_T_ICH10:
   3726 	case WM_T_PCH:
   3727 	case WM_T_PCH2:
   3728 	case WM_T_PCH_LPT:
   3729 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3730 		if (wm_check_reset_block(sc) == 0) {
   3731 			/*
   3732 			 * Gate automatic PHY configuration by hardware on
   3733 			 * non-managed 82579
   3734 			 */
   3735 			if ((sc->sc_type == WM_T_PCH2)
   3736 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3737 				== 0))
   3738 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3739 
   3740 			reg |= CTRL_PHY_RESET;
   3741 			phy_reset = 1;
   3742 		}
   3743 		wm_get_swfwhw_semaphore(sc);
   3744 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3745 		/* Don't insert a completion barrier when reset */
   3746 		delay(20*1000);
   3747 		wm_put_swfwhw_semaphore(sc);
   3748 		break;
   3749 	case WM_T_82580:
   3750 	case WM_T_I350:
   3751 	case WM_T_I354:
   3752 	case WM_T_I210:
   3753 	case WM_T_I211:
   3754 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3755 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3756 			CSR_WRITE_FLUSH(sc);
   3757 		delay(5000);
   3758 		break;
   3759 	case WM_T_82542_2_0:
   3760 	case WM_T_82542_2_1:
   3761 	case WM_T_82543:
   3762 	case WM_T_82540:
   3763 	case WM_T_82545:
   3764 	case WM_T_82546:
   3765 	case WM_T_82571:
   3766 	case WM_T_82572:
   3767 	case WM_T_82573:
   3768 	case WM_T_82574:
   3769 	case WM_T_82575:
   3770 	case WM_T_82576:
   3771 	case WM_T_82583:
   3772 	default:
   3773 		/* Everything else can safely use the documented method. */
   3774 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3775 		break;
   3776 	}
   3777 
   3778 	/* Must release the MDIO ownership after MAC reset */
   3779 	switch (sc->sc_type) {
   3780 	case WM_T_82573:
   3781 	case WM_T_82574:
   3782 	case WM_T_82583:
   3783 		if (error == 0)
   3784 			wm_put_hw_semaphore_82573(sc);
   3785 		break;
   3786 	default:
   3787 		break;
   3788 	}
   3789 
   3790 	if (phy_reset != 0)
   3791 		wm_get_cfg_done(sc);
   3792 
   3793 	/* reload EEPROM */
   3794 	switch (sc->sc_type) {
   3795 	case WM_T_82542_2_0:
   3796 	case WM_T_82542_2_1:
   3797 	case WM_T_82543:
   3798 	case WM_T_82544:
   3799 		delay(10);
   3800 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3801 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3802 		CSR_WRITE_FLUSH(sc);
   3803 		delay(2000);
   3804 		break;
   3805 	case WM_T_82540:
   3806 	case WM_T_82545:
   3807 	case WM_T_82545_3:
   3808 	case WM_T_82546:
   3809 	case WM_T_82546_3:
   3810 		delay(5*1000);
   3811 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3812 		break;
   3813 	case WM_T_82541:
   3814 	case WM_T_82541_2:
   3815 	case WM_T_82547:
   3816 	case WM_T_82547_2:
   3817 		delay(20000);
   3818 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3819 		break;
   3820 	case WM_T_82571:
   3821 	case WM_T_82572:
   3822 	case WM_T_82573:
   3823 	case WM_T_82574:
   3824 	case WM_T_82583:
   3825 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3826 			delay(10);
   3827 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3828 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3829 			CSR_WRITE_FLUSH(sc);
   3830 		}
   3831 		/* check EECD_EE_AUTORD */
   3832 		wm_get_auto_rd_done(sc);
   3833 		/*
   3834 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3835 		 * is set.
   3836 		 */
   3837 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3838 		    || (sc->sc_type == WM_T_82583))
   3839 			delay(25*1000);
   3840 		break;
   3841 	case WM_T_82575:
   3842 	case WM_T_82576:
   3843 	case WM_T_82580:
   3844 	case WM_T_I350:
   3845 	case WM_T_I354:
   3846 	case WM_T_I210:
   3847 	case WM_T_I211:
   3848 	case WM_T_80003:
   3849 		/* check EECD_EE_AUTORD */
   3850 		wm_get_auto_rd_done(sc);
   3851 		break;
   3852 	case WM_T_ICH8:
   3853 	case WM_T_ICH9:
   3854 	case WM_T_ICH10:
   3855 	case WM_T_PCH:
   3856 	case WM_T_PCH2:
   3857 	case WM_T_PCH_LPT:
   3858 		break;
   3859 	default:
   3860 		panic("%s: unknown type\n", __func__);
   3861 	}
   3862 
   3863 	/* Check whether EEPROM is present or not */
   3864 	switch (sc->sc_type) {
   3865 	case WM_T_82575:
   3866 	case WM_T_82576:
   3867 	case WM_T_82580:
   3868 	case WM_T_I350:
   3869 	case WM_T_I354:
   3870 	case WM_T_ICH8:
   3871 	case WM_T_ICH9:
   3872 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3873 			/* Not found */
   3874 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3875 			if (sc->sc_type == WM_T_82575)
   3876 				wm_reset_init_script_82575(sc);
   3877 		}
   3878 		break;
   3879 	default:
   3880 		break;
   3881 	}
   3882 
   3883 	if ((sc->sc_type == WM_T_82580)
   3884 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3885 		/* clear global device reset status bit */
   3886 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3887 	}
   3888 
   3889 	/* Clear any pending interrupt events. */
   3890 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3891 	reg = CSR_READ(sc, WMREG_ICR);
   3892 	if (sc->sc_nintrs > 1) {
   3893 		if (sc->sc_type != WM_T_82574) {
   3894 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3895 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3896 		} else
   3897 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3898 	}
   3899 
   3900 	/* reload sc_ctrl */
   3901 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3902 
   3903 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3904 		wm_set_eee_i350(sc);
   3905 
   3906 	/* dummy read from WUC */
   3907 	if (sc->sc_type == WM_T_PCH)
   3908 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3909 	/*
   3910 	 * For PCH, this write will make sure that any noise will be detected
   3911 	 * as a CRC error and be dropped rather than show up as a bad packet
   3912 	 * to the DMA engine
   3913 	 */
   3914 	if (sc->sc_type == WM_T_PCH)
   3915 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3916 
   3917 	if (sc->sc_type >= WM_T_82544)
   3918 		CSR_WRITE(sc, WMREG_WUC, 0);
   3919 
   3920 	wm_reset_mdicnfg_82580(sc);
   3921 
   3922 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3923 		wm_pll_workaround_i210(sc);
   3924 }
   3925 
   3926 /*
   3927  * wm_add_rxbuf:
   3928  *
   3929  *	Add a receive buffer to the indiciated descriptor.
   3930  */
   3931 static int
   3932 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   3933 {
   3934 	struct wm_softc *sc = rxq->rxq_sc;
   3935 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   3936 	struct mbuf *m;
   3937 	int error;
   3938 
   3939 	KASSERT(WM_RX_LOCKED(rxq));
   3940 
   3941 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3942 	if (m == NULL)
   3943 		return ENOBUFS;
   3944 
   3945 	MCLGET(m, M_DONTWAIT);
   3946 	if ((m->m_flags & M_EXT) == 0) {
   3947 		m_freem(m);
   3948 		return ENOBUFS;
   3949 	}
   3950 
   3951 	if (rxs->rxs_mbuf != NULL)
   3952 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3953 
   3954 	rxs->rxs_mbuf = m;
   3955 
   3956 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3957 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3958 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3959 	if (error) {
   3960 		/* XXX XXX XXX */
   3961 		aprint_error_dev(sc->sc_dev,
   3962 		    "unable to load rx DMA map %d, error = %d\n",
   3963 		    idx, error);
   3964 		panic("wm_add_rxbuf");
   3965 	}
   3966 
   3967 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3968 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3969 
   3970 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3971 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3972 			wm_init_rxdesc(rxq, idx);
   3973 	} else
   3974 		wm_init_rxdesc(rxq, idx);
   3975 
   3976 	return 0;
   3977 }
   3978 
   3979 /*
   3980  * wm_rxdrain:
   3981  *
   3982  *	Drain the receive queue.
   3983  */
   3984 static void
   3985 wm_rxdrain(struct wm_rxqueue *rxq)
   3986 {
   3987 	struct wm_softc *sc = rxq->rxq_sc;
   3988 	struct wm_rxsoft *rxs;
   3989 	int i;
   3990 
   3991 	KASSERT(WM_RX_LOCKED(rxq));
   3992 
   3993 	for (i = 0; i < WM_NRXDESC; i++) {
   3994 		rxs = &rxq->rxq_soft[i];
   3995 		if (rxs->rxs_mbuf != NULL) {
   3996 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3997 			m_freem(rxs->rxs_mbuf);
   3998 			rxs->rxs_mbuf = NULL;
   3999 		}
   4000 	}
   4001 }
   4002 
   4003 
   4004 /*
   4005  * XXX copy from FreeBSD's sys/net/rss_config.c
   4006  */
   4007 /*
   4008  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4009  * effectiveness may be limited by algorithm choice and available entropy
   4010  * during the boot.
   4011  *
   4012  * XXXRW: And that we don't randomize it yet!
   4013  *
   4014  * This is the default Microsoft RSS specification key which is also
   4015  * the Chelsio T5 firmware default key.
   4016  */
   4017 #define RSS_KEYSIZE 40
   4018 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4019 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4020 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4021 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4022 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4023 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4024 };
   4025 
   4026 /*
   4027  * Caller must pass an array of size sizeof(rss_key).
   4028  *
   4029  * XXX
   4030  * As if_ixgbe may use this function, this function should not be
   4031  * if_wm specific function.
   4032  */
   4033 static void
   4034 wm_rss_getkey(uint8_t *key)
   4035 {
   4036 
   4037 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4038 }
   4039 
   4040 /*
   4041  * Setup registers for RSS.
   4042  *
   4043  * XXX not yet VMDq support
   4044  */
   4045 static void
   4046 wm_init_rss(struct wm_softc *sc)
   4047 {
   4048 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4049 	int i;
   4050 
   4051 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4052 
   4053 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4054 		int qid, reta_ent;
   4055 
   4056 		qid  = i % sc->sc_nrxqueues;
   4057 		switch(sc->sc_type) {
   4058 		case WM_T_82574:
   4059 			reta_ent = __SHIFTIN(qid,
   4060 			    RETA_ENT_QINDEX_MASK_82574);
   4061 			break;
   4062 		case WM_T_82575:
   4063 			reta_ent = __SHIFTIN(qid,
   4064 			    RETA_ENT_QINDEX1_MASK_82575);
   4065 			break;
   4066 		default:
   4067 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4068 			break;
   4069 		}
   4070 
   4071 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4072 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4073 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4074 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4075 	}
   4076 
   4077 	wm_rss_getkey((uint8_t *)rss_key);
   4078 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4079 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4080 
   4081 	if (sc->sc_type == WM_T_82574)
   4082 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4083 	else
   4084 		mrqc = MRQC_ENABLE_RSS_MQ;
   4085 
   4086 	/* XXXX
   4087 	 * The same as FreeBSD igb.
   4088 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4089 	 */
   4090 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4091 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4092 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4093 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4094 
   4095 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4096 }
   4097 
   4098 /*
   4099  * Adjust TX and RX queue numbers which the system actulally uses.
   4100  *
   4101  * The numbers are affected by below parameters.
   4102  *     - The nubmer of hardware queues
   4103  *     - The number of MSI-X vectors (= "nvectors" argument)
   4104  *     - ncpu
   4105  */
   4106 static void
   4107 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4108 {
   4109 	int hw_ntxqueues, hw_nrxqueues;
   4110 
   4111 	if (nvectors < 3) {
   4112 		sc->sc_ntxqueues = 1;
   4113 		sc->sc_nrxqueues = 1;
   4114 		return;
   4115 	}
   4116 
   4117 	switch(sc->sc_type) {
   4118 	case WM_T_82572:
   4119 		hw_ntxqueues = 2;
   4120 		hw_nrxqueues = 2;
   4121 		break;
   4122 	case WM_T_82574:
   4123 		hw_ntxqueues = 2;
   4124 		hw_nrxqueues = 2;
   4125 		break;
   4126 	case WM_T_82575:
   4127 		hw_ntxqueues = 4;
   4128 		hw_nrxqueues = 4;
   4129 		break;
   4130 	case WM_T_82576:
   4131 		hw_ntxqueues = 16;
   4132 		hw_nrxqueues = 16;
   4133 		break;
   4134 	case WM_T_82580:
   4135 	case WM_T_I350:
   4136 	case WM_T_I354:
   4137 		hw_ntxqueues = 8;
   4138 		hw_nrxqueues = 8;
   4139 		break;
   4140 	case WM_T_I210:
   4141 		hw_ntxqueues = 4;
   4142 		hw_nrxqueues = 4;
   4143 		break;
   4144 	case WM_T_I211:
   4145 		hw_ntxqueues = 2;
   4146 		hw_nrxqueues = 2;
   4147 		break;
   4148 		/*
   4149 		 * As below ethernet controllers does not support MSI-X,
   4150 		 * this driver let them not use multiqueue.
   4151 		 *     - WM_T_80003
   4152 		 *     - WM_T_ICH8
   4153 		 *     - WM_T_ICH9
   4154 		 *     - WM_T_ICH10
   4155 		 *     - WM_T_PCH
   4156 		 *     - WM_T_PCH2
   4157 		 *     - WM_T_PCH_LPT
   4158 		 */
   4159 	default:
   4160 		hw_ntxqueues = 1;
   4161 		hw_nrxqueues = 1;
   4162 		break;
   4163 	}
   4164 
   4165 	/*
   4166 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4167 	 * the number of queues used actually.
   4168 	 *
   4169 	 * XXX
   4170 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4171 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4172 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4173 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4174 	 * such a way.
   4175 	 */
   4176 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4177 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4178 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4179 	} else {
   4180 		sc->sc_ntxqueues = hw_ntxqueues;
   4181 		sc->sc_nrxqueues = hw_nrxqueues;
   4182 	}
   4183 
   4184 	/*
   4185 	 * As queues more then cpus cannot improve scaling, we limit
   4186 	 * the number of queues used actually.
   4187 	 */
   4188 	if (ncpu < sc->sc_ntxqueues)
   4189 		sc->sc_ntxqueues = ncpu;
   4190 	if (ncpu < sc->sc_nrxqueues)
   4191 		sc->sc_nrxqueues = ncpu;
   4192 
   4193 	/* XXX Currently, this driver supports RX multiqueue only. */
   4194 	sc->sc_ntxqueues = 1;
   4195 }
   4196 
   4197 /*
   4198  * Both single interrupt MSI and INTx can use this function.
   4199  */
   4200 static int
   4201 wm_setup_legacy(struct wm_softc *sc)
   4202 {
   4203 	pci_chipset_tag_t pc = sc->sc_pc;
   4204 	const char *intrstr = NULL;
   4205 	char intrbuf[PCI_INTRSTR_LEN];
   4206 	int error;
   4207 
   4208 	error = wm_alloc_txrx_queues(sc);
   4209 	if (error) {
   4210 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4211 		    error);
   4212 		return ENOMEM;
   4213 	}
   4214 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4215 	    sizeof(intrbuf));
   4216 #ifdef WM_MPSAFE
   4217 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4218 #endif
   4219 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4220 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4221 	if (sc->sc_ihs[0] == NULL) {
   4222 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4223 		    (pci_intr_type(sc->sc_intrs[0])
   4224 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4225 		return ENOMEM;
   4226 	}
   4227 
   4228 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4229 	sc->sc_nintrs = 1;
   4230 	return 0;
   4231 }
   4232 
   4233 static int
   4234 wm_setup_msix(struct wm_softc *sc)
   4235 {
   4236 	void *vih;
   4237 	kcpuset_t *affinity;
   4238 	int qidx, error, intr_idx, tx_established, rx_established;
   4239 	pci_chipset_tag_t pc = sc->sc_pc;
   4240 	const char *intrstr = NULL;
   4241 	char intrbuf[PCI_INTRSTR_LEN];
   4242 	char intr_xname[INTRDEVNAMEBUF];
   4243 	/*
   4244 	 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
   4245 	 * start from CPU#1.
   4246 	 */
   4247 	int affinity_offset = 1;
   4248 
   4249 	error = wm_alloc_txrx_queues(sc);
   4250 	if (error) {
   4251 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4252 		    error);
   4253 		return ENOMEM;
   4254 	}
   4255 
   4256 	kcpuset_create(&affinity, false);
   4257 	intr_idx = 0;
   4258 
   4259 	/*
   4260 	 * TX
   4261 	 */
   4262 	tx_established = 0;
   4263 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4264 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4265 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4266 
   4267 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4268 		    sizeof(intrbuf));
   4269 #ifdef WM_MPSAFE
   4270 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4271 		    PCI_INTR_MPSAFE, true);
   4272 #endif
   4273 		memset(intr_xname, 0, sizeof(intr_xname));
   4274 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4275 		    device_xname(sc->sc_dev), qidx);
   4276 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4277 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4278 		if (vih == NULL) {
   4279 			aprint_error_dev(sc->sc_dev,
   4280 			    "unable to establish MSI-X(for TX)%s%s\n",
   4281 			    intrstr ? " at " : "",
   4282 			    intrstr ? intrstr : "");
   4283 
   4284 			goto fail_0;
   4285 		}
   4286 		kcpuset_zero(affinity);
   4287 		/* Round-robin affinity */
   4288 		kcpuset_set(affinity, affinity_to);
   4289 		error = interrupt_distribute(vih, affinity, NULL);
   4290 		if (error == 0) {
   4291 			aprint_normal_dev(sc->sc_dev,
   4292 			    "for TX interrupting at %s affinity to %u\n",
   4293 			    intrstr, affinity_to);
   4294 		} else {
   4295 			aprint_normal_dev(sc->sc_dev,
   4296 			    "for TX interrupting at %s\n", intrstr);
   4297 		}
   4298 		sc->sc_ihs[intr_idx] = vih;
   4299 		txq->txq_id = qidx;
   4300 		txq->txq_intr_idx = intr_idx;
   4301 
   4302 		tx_established++;
   4303 		intr_idx++;
   4304 	}
   4305 
   4306 	/*
   4307 	 * RX
   4308 	 */
   4309 	rx_established = 0;
   4310 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4311 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4312 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4313 
   4314 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4315 		    sizeof(intrbuf));
   4316 #ifdef WM_MPSAFE
   4317 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4318 		    PCI_INTR_MPSAFE, true);
   4319 #endif
   4320 		memset(intr_xname, 0, sizeof(intr_xname));
   4321 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4322 		    device_xname(sc->sc_dev), qidx);
   4323 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4324 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4325 		if (vih == NULL) {
   4326 			aprint_error_dev(sc->sc_dev,
   4327 			    "unable to establish MSI-X(for RX)%s%s\n",
   4328 			    intrstr ? " at " : "",
   4329 			    intrstr ? intrstr : "");
   4330 
   4331 			goto fail_1;
   4332 		}
   4333 		kcpuset_zero(affinity);
   4334 		/* Round-robin affinity */
   4335 		kcpuset_set(affinity, affinity_to);
   4336 		error = interrupt_distribute(vih, affinity, NULL);
   4337 		if (error == 0) {
   4338 			aprint_normal_dev(sc->sc_dev,
   4339 			    "for RX interrupting at %s affinity to %u\n",
   4340 			    intrstr, affinity_to);
   4341 		} else {
   4342 			aprint_normal_dev(sc->sc_dev,
   4343 			    "for RX interrupting at %s\n", intrstr);
   4344 		}
   4345 		sc->sc_ihs[intr_idx] = vih;
   4346 		rxq->rxq_id = qidx;
   4347 		rxq->rxq_intr_idx = intr_idx;
   4348 
   4349 		rx_established++;
   4350 		intr_idx++;
   4351 	}
   4352 
   4353 	/*
   4354 	 * LINK
   4355 	 */
   4356 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4357 	    sizeof(intrbuf));
   4358 #ifdef WM_MPSAFE
   4359 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4360 	    PCI_INTR_MPSAFE, true);
   4361 #endif
   4362 	memset(intr_xname, 0, sizeof(intr_xname));
   4363 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4364 	    device_xname(sc->sc_dev));
   4365 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4366 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4367 	if (vih == NULL) {
   4368 		aprint_error_dev(sc->sc_dev,
   4369 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4370 		    intrstr ? " at " : "",
   4371 		    intrstr ? intrstr : "");
   4372 
   4373 		goto fail_1;
   4374 	}
   4375 	/* keep default affinity to LINK interrupt */
   4376 	aprint_normal_dev(sc->sc_dev,
   4377 	    "for LINK interrupting at %s\n", intrstr);
   4378 	sc->sc_ihs[intr_idx] = vih;
   4379 	sc->sc_link_intr_idx = intr_idx;
   4380 
   4381 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4382 	kcpuset_destroy(affinity);
   4383 	return 0;
   4384 
   4385  fail_1:
   4386 	for (qidx = 0; qidx < rx_established; qidx++) {
   4387 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4388 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
   4389 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4390 	}
   4391  fail_0:
   4392 	for (qidx = 0; qidx < tx_established; qidx++) {
   4393 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4394 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
   4395 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4396 	}
   4397 
   4398 	kcpuset_destroy(affinity);
   4399 	return ENOMEM;
   4400 }
   4401 
   4402 /*
   4403  * wm_init:		[ifnet interface function]
   4404  *
   4405  *	Initialize the interface.
   4406  */
   4407 static int
   4408 wm_init(struct ifnet *ifp)
   4409 {
   4410 	struct wm_softc *sc = ifp->if_softc;
   4411 	int ret;
   4412 
   4413 	WM_CORE_LOCK(sc);
   4414 	ret = wm_init_locked(ifp);
   4415 	WM_CORE_UNLOCK(sc);
   4416 
   4417 	return ret;
   4418 }
   4419 
   4420 static int
   4421 wm_init_locked(struct ifnet *ifp)
   4422 {
   4423 	struct wm_softc *sc = ifp->if_softc;
   4424 	int i, j, trynum, error = 0;
   4425 	uint32_t reg;
   4426 
   4427 	KASSERT(WM_CORE_LOCKED(sc));
   4428 	/*
   4429 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4430 	 * There is a small but measurable benefit to avoiding the adjusment
   4431 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4432 	 * on such platforms.  One possibility is that the DMA itself is
   4433 	 * slightly more efficient if the front of the entire packet (instead
   4434 	 * of the front of the headers) is aligned.
   4435 	 *
   4436 	 * Note we must always set align_tweak to 0 if we are using
   4437 	 * jumbo frames.
   4438 	 */
   4439 #ifdef __NO_STRICT_ALIGNMENT
   4440 	sc->sc_align_tweak = 0;
   4441 #else
   4442 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4443 		sc->sc_align_tweak = 0;
   4444 	else
   4445 		sc->sc_align_tweak = 2;
   4446 #endif /* __NO_STRICT_ALIGNMENT */
   4447 
   4448 	/* Cancel any pending I/O. */
   4449 	wm_stop_locked(ifp, 0);
   4450 
   4451 	/* update statistics before reset */
   4452 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4453 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4454 
   4455 	/* Reset the chip to a known state. */
   4456 	wm_reset(sc);
   4457 
   4458 	switch (sc->sc_type) {
   4459 	case WM_T_82571:
   4460 	case WM_T_82572:
   4461 	case WM_T_82573:
   4462 	case WM_T_82574:
   4463 	case WM_T_82583:
   4464 	case WM_T_80003:
   4465 	case WM_T_ICH8:
   4466 	case WM_T_ICH9:
   4467 	case WM_T_ICH10:
   4468 	case WM_T_PCH:
   4469 	case WM_T_PCH2:
   4470 	case WM_T_PCH_LPT:
   4471 		/* AMT based hardware can now take control from firmware */
   4472 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4473 			wm_get_hw_control(sc);
   4474 		break;
   4475 	default:
   4476 		break;
   4477 	}
   4478 
   4479 	/* Init hardware bits */
   4480 	wm_initialize_hardware_bits(sc);
   4481 
   4482 	/* Reset the PHY. */
   4483 	if (sc->sc_flags & WM_F_HAS_MII)
   4484 		wm_gmii_reset(sc);
   4485 
   4486 	/* Calculate (E)ITR value */
   4487 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4488 		sc->sc_itr = 450;	/* For EITR */
   4489 	} else if (sc->sc_type >= WM_T_82543) {
   4490 		/*
   4491 		 * Set up the interrupt throttling register (units of 256ns)
   4492 		 * Note that a footnote in Intel's documentation says this
   4493 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4494 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4495 		 * that that is also true for the 1024ns units of the other
   4496 		 * interrupt-related timer registers -- so, really, we ought
   4497 		 * to divide this value by 4 when the link speed is low.
   4498 		 *
   4499 		 * XXX implement this division at link speed change!
   4500 		 */
   4501 
   4502 		/*
   4503 		 * For N interrupts/sec, set this value to:
   4504 		 * 1000000000 / (N * 256).  Note that we set the
   4505 		 * absolute and packet timer values to this value
   4506 		 * divided by 4 to get "simple timer" behavior.
   4507 		 */
   4508 
   4509 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4510 	}
   4511 
   4512 	error = wm_init_txrx_queues(sc);
   4513 	if (error)
   4514 		goto out;
   4515 
   4516 	/*
   4517 	 * Clear out the VLAN table -- we don't use it (yet).
   4518 	 */
   4519 	CSR_WRITE(sc, WMREG_VET, 0);
   4520 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4521 		trynum = 10; /* Due to hw errata */
   4522 	else
   4523 		trynum = 1;
   4524 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4525 		for (j = 0; j < trynum; j++)
   4526 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4527 
   4528 	/*
   4529 	 * Set up flow-control parameters.
   4530 	 *
   4531 	 * XXX Values could probably stand some tuning.
   4532 	 */
   4533 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4534 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4535 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4536 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4537 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4538 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4539 	}
   4540 
   4541 	sc->sc_fcrtl = FCRTL_DFLT;
   4542 	if (sc->sc_type < WM_T_82543) {
   4543 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4544 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4545 	} else {
   4546 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4547 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4548 	}
   4549 
   4550 	if (sc->sc_type == WM_T_80003)
   4551 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4552 	else
   4553 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4554 
   4555 	/* Writes the control register. */
   4556 	wm_set_vlan(sc);
   4557 
   4558 	if (sc->sc_flags & WM_F_HAS_MII) {
   4559 		int val;
   4560 
   4561 		switch (sc->sc_type) {
   4562 		case WM_T_80003:
   4563 		case WM_T_ICH8:
   4564 		case WM_T_ICH9:
   4565 		case WM_T_ICH10:
   4566 		case WM_T_PCH:
   4567 		case WM_T_PCH2:
   4568 		case WM_T_PCH_LPT:
   4569 			/*
   4570 			 * Set the mac to wait the maximum time between each
   4571 			 * iteration and increase the max iterations when
   4572 			 * polling the phy; this fixes erroneous timeouts at
   4573 			 * 10Mbps.
   4574 			 */
   4575 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4576 			    0xFFFF);
   4577 			val = wm_kmrn_readreg(sc,
   4578 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4579 			val |= 0x3F;
   4580 			wm_kmrn_writereg(sc,
   4581 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4582 			break;
   4583 		default:
   4584 			break;
   4585 		}
   4586 
   4587 		if (sc->sc_type == WM_T_80003) {
   4588 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4589 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4590 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4591 
   4592 			/* Bypass RX and TX FIFO's */
   4593 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4594 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4595 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4596 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4597 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4598 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4599 		}
   4600 	}
   4601 #if 0
   4602 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4603 #endif
   4604 
   4605 	/* Set up checksum offload parameters. */
   4606 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4607 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4608 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4609 		reg |= RXCSUM_IPOFL;
   4610 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4611 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4612 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4613 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4614 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4615 
   4616 	/* Set up MSI-X */
   4617 	if (sc->sc_nintrs > 1) {
   4618 		uint32_t ivar;
   4619 
   4620 		if (sc->sc_type == WM_T_82575) {
   4621 			/* Interrupt control */
   4622 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4623 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4624 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4625 
   4626 			/* TX */
   4627 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4628 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4629 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4630 				    EITR_TX_QUEUE(txq->txq_id));
   4631 			}
   4632 			/* RX */
   4633 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4634 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4635 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4636 				    EITR_RX_QUEUE(rxq->rxq_id));
   4637 			}
   4638 			/* Link status */
   4639 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4640 			    EITR_OTHER);
   4641 		} else if (sc->sc_type == WM_T_82574) {
   4642 			/* Interrupt control */
   4643 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4644 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4645 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4646 
   4647 			ivar = 0;
   4648 			/* TX */
   4649 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4650 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4651 				ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
   4652 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4653 			}
   4654 			/* RX */
   4655 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4656 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4657 				ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
   4658 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4659 			}
   4660 			/* Link status */
   4661 			ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
   4662 			    IVAR_OTHER_MASK);
   4663 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4664 		} else {
   4665 			/* Interrupt control */
   4666 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
   4667 			    | GPIE_MULTI_MSIX | GPIE_EIAME
   4668 			    | GPIE_PBA);
   4669 
   4670 			switch (sc->sc_type) {
   4671 			case WM_T_82580:
   4672 			case WM_T_I350:
   4673 			case WM_T_I354:
   4674 			case WM_T_I210:
   4675 			case WM_T_I211:
   4676 				/* TX */
   4677 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4678 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4679 					int qid = txq->txq_id;
   4680 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4681 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4682 					ivar |= __SHIFTIN(
   4683 						(txq->txq_intr_idx | IVAR_VALID),
   4684 						IVAR_TX_MASK_Q(qid));
   4685 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4686 				}
   4687 
   4688 				/* RX */
   4689 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4690 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4691 					int qid = rxq->rxq_id;
   4692 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4693 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4694 					ivar |= __SHIFTIN(
   4695 						(rxq->rxq_intr_idx | IVAR_VALID),
   4696 						IVAR_RX_MASK_Q(qid));
   4697 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4698 				}
   4699 				break;
   4700 			case WM_T_82576:
   4701 				/* TX */
   4702 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4703 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4704 					int qid = txq->txq_id;
   4705 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4706 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4707 					ivar |= __SHIFTIN(
   4708 						(txq->txq_intr_idx | IVAR_VALID),
   4709 						IVAR_TX_MASK_Q_82576(qid));
   4710 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4711 				}
   4712 
   4713 				/* RX */
   4714 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4715 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4716 					int qid = rxq->rxq_id;
   4717 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4718 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4719 					ivar |= __SHIFTIN(
   4720 						(rxq->rxq_intr_idx | IVAR_VALID),
   4721 						IVAR_RX_MASK_Q_82576(qid));
   4722 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4723 				}
   4724 				break;
   4725 			default:
   4726 				break;
   4727 			}
   4728 
   4729 			/* Link status */
   4730 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4731 			    IVAR_MISC_OTHER);
   4732 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4733 		}
   4734 
   4735 		if (sc->sc_nrxqueues > 1) {
   4736 			wm_init_rss(sc);
   4737 
   4738 			/*
   4739 			** NOTE: Receive Full-Packet Checksum Offload
   4740 			** is mutually exclusive with Multiqueue. However
   4741 			** this is not the same as TCP/IP checksums which
   4742 			** still work.
   4743 			*/
   4744 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4745 			reg |= RXCSUM_PCSD;
   4746 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4747 		}
   4748 	}
   4749 
   4750 	/* Set up the interrupt registers. */
   4751 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4752 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4753 	    ICR_RXO | ICR_RXT0;
   4754 	if (sc->sc_nintrs > 1) {
   4755 		uint32_t mask;
   4756 		switch (sc->sc_type) {
   4757 		case WM_T_82574:
   4758 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4759 			    WMREG_EIAC_82574_MSIX_MASK);
   4760 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4761 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4762 			break;
   4763 		default:
   4764 			if (sc->sc_type == WM_T_82575) {
   4765 				mask = 0;
   4766 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4767 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4768 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4769 				}
   4770 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4771 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4772 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4773 				}
   4774 				mask |= EITR_OTHER;
   4775 			} else {
   4776 				mask = 0;
   4777 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4778 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4779 					mask |= 1 << txq->txq_intr_idx;
   4780 				}
   4781 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4782 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4783 					mask |= 1 << rxq->rxq_intr_idx;
   4784 				}
   4785 				mask |= 1 << sc->sc_link_intr_idx;
   4786 			}
   4787 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4788 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4789 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4790 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4791 			break;
   4792 		}
   4793 	} else
   4794 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4795 
   4796 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4797 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4798 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4799 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4800 		reg |= KABGTXD_BGSQLBIAS;
   4801 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4802 	}
   4803 
   4804 	/* Set up the inter-packet gap. */
   4805 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4806 
   4807 	if (sc->sc_type >= WM_T_82543) {
   4808 		/*
   4809 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4810 		 * the multi queue function with MSI-X.
   4811 		 */
   4812 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4813 			int qidx;
   4814 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4815 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4816 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4817 				    sc->sc_itr);
   4818 			}
   4819 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4820 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4821 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4822 				    sc->sc_itr);
   4823 			}
   4824 			/*
   4825 			 * Link interrupts occur much less than TX
   4826 			 * interrupts and RX interrupts. So, we don't
   4827 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4828 			 * FreeBSD's if_igb.
   4829 			 */
   4830 		} else
   4831 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4832 	}
   4833 
   4834 	/* Set the VLAN ethernetype. */
   4835 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4836 
   4837 	/*
   4838 	 * Set up the transmit control register; we start out with
   4839 	 * a collision distance suitable for FDX, but update it whe
   4840 	 * we resolve the media type.
   4841 	 */
   4842 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4843 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4844 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4845 	if (sc->sc_type >= WM_T_82571)
   4846 		sc->sc_tctl |= TCTL_MULR;
   4847 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4848 
   4849 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4850 		/* Write TDT after TCTL.EN is set. See the document. */
   4851 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4852 	}
   4853 
   4854 	if (sc->sc_type == WM_T_80003) {
   4855 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4856 		reg &= ~TCTL_EXT_GCEX_MASK;
   4857 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4858 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4859 	}
   4860 
   4861 	/* Set the media. */
   4862 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4863 		goto out;
   4864 
   4865 	/* Configure for OS presence */
   4866 	wm_init_manageability(sc);
   4867 
   4868 	/*
   4869 	 * Set up the receive control register; we actually program
   4870 	 * the register when we set the receive filter.  Use multicast
   4871 	 * address offset type 0.
   4872 	 *
   4873 	 * Only the i82544 has the ability to strip the incoming
   4874 	 * CRC, so we don't enable that feature.
   4875 	 */
   4876 	sc->sc_mchash_type = 0;
   4877 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4878 	    | RCTL_MO(sc->sc_mchash_type);
   4879 
   4880 	/*
   4881 	 * The I350 has a bug where it always strips the CRC whether
   4882 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4883 	 */
   4884 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4885 	    || (sc->sc_type == WM_T_I210))
   4886 		sc->sc_rctl |= RCTL_SECRC;
   4887 
   4888 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4889 	    && (ifp->if_mtu > ETHERMTU)) {
   4890 		sc->sc_rctl |= RCTL_LPE;
   4891 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4892 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4893 	}
   4894 
   4895 	if (MCLBYTES == 2048) {
   4896 		sc->sc_rctl |= RCTL_2k;
   4897 	} else {
   4898 		if (sc->sc_type >= WM_T_82543) {
   4899 			switch (MCLBYTES) {
   4900 			case 4096:
   4901 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4902 				break;
   4903 			case 8192:
   4904 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4905 				break;
   4906 			case 16384:
   4907 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4908 				break;
   4909 			default:
   4910 				panic("wm_init: MCLBYTES %d unsupported",
   4911 				    MCLBYTES);
   4912 				break;
   4913 			}
   4914 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4915 	}
   4916 
   4917 	/* Set the receive filter. */
   4918 	wm_set_filter(sc);
   4919 
   4920 	/* Enable ECC */
   4921 	switch (sc->sc_type) {
   4922 	case WM_T_82571:
   4923 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4924 		reg |= PBA_ECC_CORR_EN;
   4925 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4926 		break;
   4927 	case WM_T_PCH_LPT:
   4928 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4929 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4930 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4931 
   4932 		reg = CSR_READ(sc, WMREG_CTRL);
   4933 		reg |= CTRL_MEHE;
   4934 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4935 		break;
   4936 	default:
   4937 		break;
   4938 	}
   4939 
   4940 	/* On 575 and later set RDT only if RX enabled */
   4941 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4942 		int qidx;
   4943 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4944 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4945 			for (i = 0; i < WM_NRXDESC; i++) {
   4946 				WM_RX_LOCK(rxq);
   4947 				wm_init_rxdesc(rxq, i);
   4948 				WM_RX_UNLOCK(rxq);
   4949 
   4950 			}
   4951 		}
   4952 	}
   4953 
   4954 	sc->sc_stopping = false;
   4955 
   4956 	/* Start the one second link check clock. */
   4957 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4958 
   4959 	/* ...all done! */
   4960 	ifp->if_flags |= IFF_RUNNING;
   4961 	ifp->if_flags &= ~IFF_OACTIVE;
   4962 
   4963  out:
   4964 	sc->sc_if_flags = ifp->if_flags;
   4965 	if (error)
   4966 		log(LOG_ERR, "%s: interface not running\n",
   4967 		    device_xname(sc->sc_dev));
   4968 	return error;
   4969 }
   4970 
   4971 /*
   4972  * wm_stop:		[ifnet interface function]
   4973  *
   4974  *	Stop transmission on the interface.
   4975  */
   4976 static void
   4977 wm_stop(struct ifnet *ifp, int disable)
   4978 {
   4979 	struct wm_softc *sc = ifp->if_softc;
   4980 
   4981 	WM_CORE_LOCK(sc);
   4982 	wm_stop_locked(ifp, disable);
   4983 	WM_CORE_UNLOCK(sc);
   4984 }
   4985 
   4986 static void
   4987 wm_stop_locked(struct ifnet *ifp, int disable)
   4988 {
   4989 	struct wm_softc *sc = ifp->if_softc;
   4990 	struct wm_txsoft *txs;
   4991 	int i, qidx;
   4992 
   4993 	KASSERT(WM_CORE_LOCKED(sc));
   4994 
   4995 	sc->sc_stopping = true;
   4996 
   4997 	/* Stop the one second clock. */
   4998 	callout_stop(&sc->sc_tick_ch);
   4999 
   5000 	/* Stop the 82547 Tx FIFO stall check timer. */
   5001 	if (sc->sc_type == WM_T_82547)
   5002 		callout_stop(&sc->sc_txfifo_ch);
   5003 
   5004 	if (sc->sc_flags & WM_F_HAS_MII) {
   5005 		/* Down the MII. */
   5006 		mii_down(&sc->sc_mii);
   5007 	} else {
   5008 #if 0
   5009 		/* Should we clear PHY's status properly? */
   5010 		wm_reset(sc);
   5011 #endif
   5012 	}
   5013 
   5014 	/* Stop the transmit and receive processes. */
   5015 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5016 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5017 	sc->sc_rctl &= ~RCTL_EN;
   5018 
   5019 	/*
   5020 	 * Clear the interrupt mask to ensure the device cannot assert its
   5021 	 * interrupt line.
   5022 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5023 	 * service any currently pending or shared interrupt.
   5024 	 */
   5025 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5026 	sc->sc_icr = 0;
   5027 	if (sc->sc_nintrs > 1) {
   5028 		if (sc->sc_type != WM_T_82574) {
   5029 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5030 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5031 		} else
   5032 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5033 	}
   5034 
   5035 	/* Release any queued transmit buffers. */
   5036 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5037 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5038 		WM_TX_LOCK(txq);
   5039 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5040 			txs = &txq->txq_soft[i];
   5041 			if (txs->txs_mbuf != NULL) {
   5042 				bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5043 				m_freem(txs->txs_mbuf);
   5044 				txs->txs_mbuf = NULL;
   5045 			}
   5046 		}
   5047 		WM_TX_UNLOCK(txq);
   5048 	}
   5049 
   5050 	/* Mark the interface as down and cancel the watchdog timer. */
   5051 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5052 	ifp->if_timer = 0;
   5053 
   5054 	if (disable) {
   5055 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5056 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5057 			WM_RX_LOCK(rxq);
   5058 			wm_rxdrain(rxq);
   5059 			WM_RX_UNLOCK(rxq);
   5060 		}
   5061 	}
   5062 
   5063 #if 0 /* notyet */
   5064 	if (sc->sc_type >= WM_T_82544)
   5065 		CSR_WRITE(sc, WMREG_WUC, 0);
   5066 #endif
   5067 }
   5068 
   5069 static void
   5070 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5071 {
   5072 	struct mbuf *m;
   5073 	int i;
   5074 
   5075 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5076 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5077 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5078 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5079 		    m->m_data, m->m_len, m->m_flags);
   5080 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5081 	    i, i == 1 ? "" : "s");
   5082 }
   5083 
   5084 /*
   5085  * wm_82547_txfifo_stall:
   5086  *
   5087  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5088  *	reset the FIFO pointers, and restart packet transmission.
   5089  */
   5090 static void
   5091 wm_82547_txfifo_stall(void *arg)
   5092 {
   5093 	struct wm_softc *sc = arg;
   5094 	struct wm_txqueue *txq = sc->sc_txq;
   5095 #ifndef WM_MPSAFE
   5096 	int s;
   5097 
   5098 	s = splnet();
   5099 #endif
   5100 	WM_TX_LOCK(txq);
   5101 
   5102 	if (sc->sc_stopping)
   5103 		goto out;
   5104 
   5105 	if (txq->txq_fifo_stall) {
   5106 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5107 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5108 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5109 			/*
   5110 			 * Packets have drained.  Stop transmitter, reset
   5111 			 * FIFO pointers, restart transmitter, and kick
   5112 			 * the packet queue.
   5113 			 */
   5114 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5115 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5116 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5117 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5118 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5119 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5120 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5121 			CSR_WRITE_FLUSH(sc);
   5122 
   5123 			txq->txq_fifo_head = 0;
   5124 			txq->txq_fifo_stall = 0;
   5125 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5126 		} else {
   5127 			/*
   5128 			 * Still waiting for packets to drain; try again in
   5129 			 * another tick.
   5130 			 */
   5131 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5132 		}
   5133 	}
   5134 
   5135 out:
   5136 	WM_TX_UNLOCK(txq);
   5137 #ifndef WM_MPSAFE
   5138 	splx(s);
   5139 #endif
   5140 }
   5141 
   5142 /*
   5143  * wm_82547_txfifo_bugchk:
   5144  *
   5145  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5146  *	prevent enqueueing a packet that would wrap around the end
   5147  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5148  *
   5149  *	We do this by checking the amount of space before the end
   5150  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5151  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5152  *	the internal FIFO pointers to the beginning, and restart
   5153  *	transmission on the interface.
   5154  */
   5155 #define	WM_FIFO_HDR		0x10
   5156 #define	WM_82547_PAD_LEN	0x3e0
   5157 static int
   5158 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5159 {
   5160 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5161 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5162 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5163 
   5164 	/* Just return if already stalled. */
   5165 	if (txq->txq_fifo_stall)
   5166 		return 1;
   5167 
   5168 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5169 		/* Stall only occurs in half-duplex mode. */
   5170 		goto send_packet;
   5171 	}
   5172 
   5173 	if (len >= WM_82547_PAD_LEN + space) {
   5174 		txq->txq_fifo_stall = 1;
   5175 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5176 		return 1;
   5177 	}
   5178 
   5179  send_packet:
   5180 	txq->txq_fifo_head += len;
   5181 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5182 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5183 
   5184 	return 0;
   5185 }
   5186 
   5187 static int
   5188 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5189 {
   5190 	int error;
   5191 
   5192 	/*
   5193 	 * Allocate the control data structures, and create and load the
   5194 	 * DMA map for it.
   5195 	 *
   5196 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5197 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5198 	 * both sets within the same 4G segment.
   5199 	 */
   5200 	if (sc->sc_type < WM_T_82544) {
   5201 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5202 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
   5203 	} else {
   5204 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5205 		txq->txq_desc_size = sizeof(txdescs_t);
   5206 	}
   5207 
   5208 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
   5209 		    (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
   5210 		    &txq->txq_desc_rseg, 0)) != 0) {
   5211 		aprint_error_dev(sc->sc_dev,
   5212 		    "unable to allocate TX control data, error = %d\n",
   5213 		    error);
   5214 		goto fail_0;
   5215 	}
   5216 
   5217 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5218 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5219 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5220 		aprint_error_dev(sc->sc_dev,
   5221 		    "unable to map TX control data, error = %d\n", error);
   5222 		goto fail_1;
   5223 	}
   5224 
   5225 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5226 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5227 		aprint_error_dev(sc->sc_dev,
   5228 		    "unable to create TX control data DMA map, error = %d\n",
   5229 		    error);
   5230 		goto fail_2;
   5231 	}
   5232 
   5233 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5234 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5235 		aprint_error_dev(sc->sc_dev,
   5236 		    "unable to load TX control data DMA map, error = %d\n",
   5237 		    error);
   5238 		goto fail_3;
   5239 	}
   5240 
   5241 	return 0;
   5242 
   5243  fail_3:
   5244 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5245  fail_2:
   5246 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5247 	    txq->txq_desc_size);
   5248  fail_1:
   5249 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5250  fail_0:
   5251 	return error;
   5252 }
   5253 
   5254 static void
   5255 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5256 {
   5257 
   5258 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5259 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5260 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5261 	    txq->txq_desc_size);
   5262 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5263 }
   5264 
   5265 static int
   5266 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5267 {
   5268 	int error;
   5269 
   5270 	/*
   5271 	 * Allocate the control data structures, and create and load the
   5272 	 * DMA map for it.
   5273 	 *
   5274 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5275 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5276 	 * both sets within the same 4G segment.
   5277 	 */
   5278 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5279 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
   5280 		    (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
   5281 		    &rxq->rxq_desc_rseg, 0)) != 0) {
   5282 		aprint_error_dev(sc->sc_dev,
   5283 		    "unable to allocate RX control data, error = %d\n",
   5284 		    error);
   5285 		goto fail_0;
   5286 	}
   5287 
   5288 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5289 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5290 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5291 		aprint_error_dev(sc->sc_dev,
   5292 		    "unable to map RX control data, error = %d\n", error);
   5293 		goto fail_1;
   5294 	}
   5295 
   5296 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5297 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5298 		aprint_error_dev(sc->sc_dev,
   5299 		    "unable to create RX control data DMA map, error = %d\n",
   5300 		    error);
   5301 		goto fail_2;
   5302 	}
   5303 
   5304 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5305 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5306 		aprint_error_dev(sc->sc_dev,
   5307 		    "unable to load RX control data DMA map, error = %d\n",
   5308 		    error);
   5309 		goto fail_3;
   5310 	}
   5311 
   5312 	return 0;
   5313 
   5314  fail_3:
   5315 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5316  fail_2:
   5317 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5318 	    rxq->rxq_desc_size);
   5319  fail_1:
   5320 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5321  fail_0:
   5322 	return error;
   5323 }
   5324 
   5325 static void
   5326 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5327 {
   5328 
   5329 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5330 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5331 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5332 	    rxq->rxq_desc_size);
   5333 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5334 }
   5335 
   5336 
   5337 static int
   5338 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5339 {
   5340 	int i, error;
   5341 
   5342 	/* Create the transmit buffer DMA maps. */
   5343 	WM_TXQUEUELEN(txq) =
   5344 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5345 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5346 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5347 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5348 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5349 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5350 			aprint_error_dev(sc->sc_dev,
   5351 			    "unable to create Tx DMA map %d, error = %d\n",
   5352 			    i, error);
   5353 			goto fail;
   5354 		}
   5355 	}
   5356 
   5357 	return 0;
   5358 
   5359  fail:
   5360 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5361 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5362 			bus_dmamap_destroy(sc->sc_dmat,
   5363 			    txq->txq_soft[i].txs_dmamap);
   5364 	}
   5365 	return error;
   5366 }
   5367 
   5368 static void
   5369 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5370 {
   5371 	int i;
   5372 
   5373 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5374 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5375 			bus_dmamap_destroy(sc->sc_dmat,
   5376 			    txq->txq_soft[i].txs_dmamap);
   5377 	}
   5378 }
   5379 
   5380 static int
   5381 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5382 {
   5383 	int i, error;
   5384 
   5385 	/* Create the receive buffer DMA maps. */
   5386 	for (i = 0; i < WM_NRXDESC; i++) {
   5387 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5388 			    MCLBYTES, 0, 0,
   5389 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5390 			aprint_error_dev(sc->sc_dev,
   5391 			    "unable to create Rx DMA map %d error = %d\n",
   5392 			    i, error);
   5393 			goto fail;
   5394 		}
   5395 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5396 	}
   5397 
   5398 	return 0;
   5399 
   5400  fail:
   5401 	for (i = 0; i < WM_NRXDESC; i++) {
   5402 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5403 			bus_dmamap_destroy(sc->sc_dmat,
   5404 			    rxq->rxq_soft[i].rxs_dmamap);
   5405 	}
   5406 	return error;
   5407 }
   5408 
   5409 static void
   5410 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5411 {
   5412 	int i;
   5413 
   5414 	for (i = 0; i < WM_NRXDESC; i++) {
   5415 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5416 			bus_dmamap_destroy(sc->sc_dmat,
   5417 			    rxq->rxq_soft[i].rxs_dmamap);
   5418 	}
   5419 }
   5420 
   5421 /*
   5422  * wm_alloc_quques:
   5423  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5424  */
   5425 static int
   5426 wm_alloc_txrx_queues(struct wm_softc *sc)
   5427 {
   5428 	int i, error, tx_done, rx_done;
   5429 
   5430 	/*
   5431 	 * For transmission
   5432 	 */
   5433 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5434 	    KM_SLEEP);
   5435 	if (sc->sc_txq == NULL) {
   5436 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
   5437 		error = ENOMEM;
   5438 		goto fail_0;
   5439 	}
   5440 
   5441 	error = 0;
   5442 	tx_done = 0;
   5443 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5444 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5445 		txq->txq_sc = sc;
   5446 #ifdef WM_MPSAFE
   5447 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5448 #else
   5449 		txq->txq_lock = NULL;
   5450 #endif
   5451 		error = wm_alloc_tx_descs(sc, txq);
   5452 		if (error)
   5453 			break;
   5454 		error = wm_alloc_tx_buffer(sc, txq);
   5455 		if (error) {
   5456 			wm_free_tx_descs(sc, txq);
   5457 			break;
   5458 		}
   5459 		tx_done++;
   5460 	}
   5461 	if (error)
   5462 		goto fail_1;
   5463 
   5464 	/*
   5465 	 * For recieve
   5466 	 */
   5467 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5468 	    KM_SLEEP);
   5469 	if (sc->sc_rxq == NULL) {
   5470 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
   5471 		error = ENOMEM;
   5472 		goto fail_1;
   5473 	}
   5474 
   5475 	error = 0;
   5476 	rx_done = 0;
   5477 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5478 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5479 		rxq->rxq_sc = sc;
   5480 #ifdef WM_MPSAFE
   5481 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5482 #else
   5483 		rxq->rxq_lock = NULL;
   5484 #endif
   5485 		error = wm_alloc_rx_descs(sc, rxq);
   5486 		if (error)
   5487 			break;
   5488 
   5489 		error = wm_alloc_rx_buffer(sc, rxq);
   5490 		if (error) {
   5491 			wm_free_rx_descs(sc, rxq);
   5492 			break;
   5493 		}
   5494 
   5495 		rx_done++;
   5496 	}
   5497 	if (error)
   5498 		goto fail_2;
   5499 
   5500 	return 0;
   5501 
   5502  fail_2:
   5503 	for (i = 0; i < rx_done; i++) {
   5504 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5505 		wm_free_rx_buffer(sc, rxq);
   5506 		wm_free_rx_descs(sc, rxq);
   5507 		if (rxq->rxq_lock)
   5508 			mutex_obj_free(rxq->rxq_lock);
   5509 	}
   5510 	kmem_free(sc->sc_rxq,
   5511 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5512  fail_1:
   5513 	for (i = 0; i < tx_done; i++) {
   5514 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5515 		wm_free_tx_buffer(sc, txq);
   5516 		wm_free_tx_descs(sc, txq);
   5517 		if (txq->txq_lock)
   5518 			mutex_obj_free(txq->txq_lock);
   5519 	}
   5520 	kmem_free(sc->sc_txq,
   5521 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5522  fail_0:
   5523 	return error;
   5524 }
   5525 
   5526 /*
   5527  * wm_free_quques:
   5528  *	Free {tx,rx}descs and {tx,rx} buffers
   5529  */
   5530 static void
   5531 wm_free_txrx_queues(struct wm_softc *sc)
   5532 {
   5533 	int i;
   5534 
   5535 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5536 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5537 		wm_free_rx_buffer(sc, rxq);
   5538 		wm_free_rx_descs(sc, rxq);
   5539 		if (rxq->rxq_lock)
   5540 			mutex_obj_free(rxq->rxq_lock);
   5541 	}
   5542 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5543 
   5544 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5545 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5546 		wm_free_tx_buffer(sc, txq);
   5547 		wm_free_tx_descs(sc, txq);
   5548 		if (txq->txq_lock)
   5549 			mutex_obj_free(txq->txq_lock);
   5550 	}
   5551 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5552 }
   5553 
   5554 static void
   5555 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5556 {
   5557 
   5558 	KASSERT(WM_TX_LOCKED(txq));
   5559 
   5560 	/* Initialize the transmit descriptor ring. */
   5561 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5562 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5563 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5564 	txq->txq_free = WM_NTXDESC(txq);
   5565 	txq->txq_next = 0;
   5566 }
   5567 
   5568 static void
   5569 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5570 {
   5571 
   5572 	KASSERT(WM_TX_LOCKED(txq));
   5573 
   5574 	if (sc->sc_type < WM_T_82543) {
   5575 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5576 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5577 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5578 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5579 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5580 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5581 	} else {
   5582 		int qid = txq->txq_id;
   5583 
   5584 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5585 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5586 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
   5587 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5588 
   5589 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5590 			/*
   5591 			 * Don't write TDT before TCTL.EN is set.
   5592 			 * See the document.
   5593 			 */
   5594 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5595 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5596 			    | TXDCTL_WTHRESH(0));
   5597 		else {
   5598 			/* ITR / 4 */
   5599 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5600 			if (sc->sc_type >= WM_T_82540) {
   5601 				/* should be same */
   5602 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5603 			}
   5604 
   5605 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5606 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5607 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5608 		}
   5609 	}
   5610 }
   5611 
   5612 static void
   5613 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5614 {
   5615 	int i;
   5616 
   5617 	KASSERT(WM_TX_LOCKED(txq));
   5618 
   5619 	/* Initialize the transmit job descriptors. */
   5620 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5621 		txq->txq_soft[i].txs_mbuf = NULL;
   5622 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5623 	txq->txq_snext = 0;
   5624 	txq->txq_sdirty = 0;
   5625 }
   5626 
   5627 static void
   5628 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5629 {
   5630 
   5631 	KASSERT(WM_TX_LOCKED(txq));
   5632 
   5633 	/*
   5634 	 * Set up some register offsets that are different between
   5635 	 * the i82542 and the i82543 and later chips.
   5636 	 */
   5637 	if (sc->sc_type < WM_T_82543) {
   5638 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5639 	} else {
   5640 		txq->txq_tdt_reg = WMREG_TDT(0);
   5641 	}
   5642 
   5643 	wm_init_tx_descs(sc, txq);
   5644 	wm_init_tx_regs(sc, txq);
   5645 	wm_init_tx_buffer(sc, txq);
   5646 }
   5647 
   5648 static void
   5649 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5650 {
   5651 
   5652 	KASSERT(WM_RX_LOCKED(rxq));
   5653 
   5654 	/*
   5655 	 * Initialize the receive descriptor and receive job
   5656 	 * descriptor rings.
   5657 	 */
   5658 	if (sc->sc_type < WM_T_82543) {
   5659 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5660 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5661 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5662 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5663 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5664 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5665 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5666 
   5667 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5668 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5669 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5670 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5671 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5672 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5673 	} else {
   5674 		int qid = rxq->rxq_id;
   5675 
   5676 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5677 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5678 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5679 
   5680 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5681 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5682 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5683 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5684 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5685 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5686 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5687 			    | RXDCTL_WTHRESH(1));
   5688 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5689 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5690 		} else {
   5691 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5692 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5693 			/* ITR / 4 */
   5694 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5695 			/* MUST be same */
   5696 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5697 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5698 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5699 		}
   5700 	}
   5701 }
   5702 
   5703 static int
   5704 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5705 {
   5706 	struct wm_rxsoft *rxs;
   5707 	int error, i;
   5708 
   5709 	KASSERT(WM_RX_LOCKED(rxq));
   5710 
   5711 	for (i = 0; i < WM_NRXDESC; i++) {
   5712 		rxs = &rxq->rxq_soft[i];
   5713 		if (rxs->rxs_mbuf == NULL) {
   5714 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5715 				log(LOG_ERR, "%s: unable to allocate or map "
   5716 				    "rx buffer %d, error = %d\n",
   5717 				    device_xname(sc->sc_dev), i, error);
   5718 				/*
   5719 				 * XXX Should attempt to run with fewer receive
   5720 				 * XXX buffers instead of just failing.
   5721 				 */
   5722 				wm_rxdrain(rxq);
   5723 				return ENOMEM;
   5724 			}
   5725 		} else {
   5726 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5727 				wm_init_rxdesc(rxq, i);
   5728 			/*
   5729 			 * For 82575 and newer device, the RX descriptors
   5730 			 * must be initialized after the setting of RCTL.EN in
   5731 			 * wm_set_filter()
   5732 			 */
   5733 		}
   5734 	}
   5735 	rxq->rxq_ptr = 0;
   5736 	rxq->rxq_discard = 0;
   5737 	WM_RXCHAIN_RESET(rxq);
   5738 
   5739 	return 0;
   5740 }
   5741 
   5742 static int
   5743 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5744 {
   5745 
   5746 	KASSERT(WM_RX_LOCKED(rxq));
   5747 
   5748 	/*
   5749 	 * Set up some register offsets that are different between
   5750 	 * the i82542 and the i82543 and later chips.
   5751 	 */
   5752 	if (sc->sc_type < WM_T_82543) {
   5753 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5754 	} else {
   5755 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5756 	}
   5757 
   5758 	wm_init_rx_regs(sc, rxq);
   5759 	return wm_init_rx_buffer(sc, rxq);
   5760 }
   5761 
   5762 /*
   5763  * wm_init_quques:
   5764  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5765  */
   5766 static int
   5767 wm_init_txrx_queues(struct wm_softc *sc)
   5768 {
   5769 	int i, error;
   5770 
   5771 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5772 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5773 		WM_TX_LOCK(txq);
   5774 		wm_init_tx_queue(sc, txq);
   5775 		WM_TX_UNLOCK(txq);
   5776 	}
   5777 
   5778 	error = 0;
   5779 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5780 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5781 		WM_RX_LOCK(rxq);
   5782 		error = wm_init_rx_queue(sc, rxq);
   5783 		WM_RX_UNLOCK(rxq);
   5784 		if (error)
   5785 			break;
   5786 	}
   5787 
   5788 	return error;
   5789 }
   5790 
   5791 /*
   5792  * wm_tx_offload:
   5793  *
   5794  *	Set up TCP/IP checksumming parameters for the
   5795  *	specified packet.
   5796  */
   5797 static int
   5798 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5799     uint8_t *fieldsp)
   5800 {
   5801 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5802 	struct mbuf *m0 = txs->txs_mbuf;
   5803 	struct livengood_tcpip_ctxdesc *t;
   5804 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5805 	uint32_t ipcse;
   5806 	struct ether_header *eh;
   5807 	int offset, iphl;
   5808 	uint8_t fields;
   5809 
   5810 	/*
   5811 	 * XXX It would be nice if the mbuf pkthdr had offset
   5812 	 * fields for the protocol headers.
   5813 	 */
   5814 
   5815 	eh = mtod(m0, struct ether_header *);
   5816 	switch (htons(eh->ether_type)) {
   5817 	case ETHERTYPE_IP:
   5818 	case ETHERTYPE_IPV6:
   5819 		offset = ETHER_HDR_LEN;
   5820 		break;
   5821 
   5822 	case ETHERTYPE_VLAN:
   5823 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5824 		break;
   5825 
   5826 	default:
   5827 		/*
   5828 		 * Don't support this protocol or encapsulation.
   5829 		 */
   5830 		*fieldsp = 0;
   5831 		*cmdp = 0;
   5832 		return 0;
   5833 	}
   5834 
   5835 	if ((m0->m_pkthdr.csum_flags &
   5836 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   5837 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5838 	} else {
   5839 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5840 	}
   5841 	ipcse = offset + iphl - 1;
   5842 
   5843 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5844 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5845 	seg = 0;
   5846 	fields = 0;
   5847 
   5848 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5849 		int hlen = offset + iphl;
   5850 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5851 
   5852 		if (__predict_false(m0->m_len <
   5853 				    (hlen + sizeof(struct tcphdr)))) {
   5854 			/*
   5855 			 * TCP/IP headers are not in the first mbuf; we need
   5856 			 * to do this the slow and painful way.  Let's just
   5857 			 * hope this doesn't happen very often.
   5858 			 */
   5859 			struct tcphdr th;
   5860 
   5861 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5862 
   5863 			m_copydata(m0, hlen, sizeof(th), &th);
   5864 			if (v4) {
   5865 				struct ip ip;
   5866 
   5867 				m_copydata(m0, offset, sizeof(ip), &ip);
   5868 				ip.ip_len = 0;
   5869 				m_copyback(m0,
   5870 				    offset + offsetof(struct ip, ip_len),
   5871 				    sizeof(ip.ip_len), &ip.ip_len);
   5872 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5873 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5874 			} else {
   5875 				struct ip6_hdr ip6;
   5876 
   5877 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5878 				ip6.ip6_plen = 0;
   5879 				m_copyback(m0,
   5880 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5881 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5882 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5883 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5884 			}
   5885 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5886 			    sizeof(th.th_sum), &th.th_sum);
   5887 
   5888 			hlen += th.th_off << 2;
   5889 		} else {
   5890 			/*
   5891 			 * TCP/IP headers are in the first mbuf; we can do
   5892 			 * this the easy way.
   5893 			 */
   5894 			struct tcphdr *th;
   5895 
   5896 			if (v4) {
   5897 				struct ip *ip =
   5898 				    (void *)(mtod(m0, char *) + offset);
   5899 				th = (void *)(mtod(m0, char *) + hlen);
   5900 
   5901 				ip->ip_len = 0;
   5902 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5903 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5904 			} else {
   5905 				struct ip6_hdr *ip6 =
   5906 				    (void *)(mtod(m0, char *) + offset);
   5907 				th = (void *)(mtod(m0, char *) + hlen);
   5908 
   5909 				ip6->ip6_plen = 0;
   5910 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5911 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5912 			}
   5913 			hlen += th->th_off << 2;
   5914 		}
   5915 
   5916 		if (v4) {
   5917 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   5918 			cmdlen |= WTX_TCPIP_CMD_IP;
   5919 		} else {
   5920 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   5921 			ipcse = 0;
   5922 		}
   5923 		cmd |= WTX_TCPIP_CMD_TSE;
   5924 		cmdlen |= WTX_TCPIP_CMD_TSE |
   5925 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   5926 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   5927 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   5928 	}
   5929 
   5930 	/*
   5931 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   5932 	 * offload feature, if we load the context descriptor, we
   5933 	 * MUST provide valid values for IPCSS and TUCSS fields.
   5934 	 */
   5935 
   5936 	ipcs = WTX_TCPIP_IPCSS(offset) |
   5937 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   5938 	    WTX_TCPIP_IPCSE(ipcse);
   5939 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   5940 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   5941 		fields |= WTX_IXSM;
   5942 	}
   5943 
   5944 	offset += iphl;
   5945 
   5946 	if (m0->m_pkthdr.csum_flags &
   5947 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   5948 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   5949 		fields |= WTX_TXSM;
   5950 		tucs = WTX_TCPIP_TUCSS(offset) |
   5951 		    WTX_TCPIP_TUCSO(offset +
   5952 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   5953 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5954 	} else if ((m0->m_pkthdr.csum_flags &
   5955 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   5956 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   5957 		fields |= WTX_TXSM;
   5958 		tucs = WTX_TCPIP_TUCSS(offset) |
   5959 		    WTX_TCPIP_TUCSO(offset +
   5960 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   5961 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5962 	} else {
   5963 		/* Just initialize it to a valid TCP context. */
   5964 		tucs = WTX_TCPIP_TUCSS(offset) |
   5965 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   5966 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5967 	}
   5968 
   5969 	/* Fill in the context descriptor. */
   5970 	t = (struct livengood_tcpip_ctxdesc *)
   5971 	    &txq->txq_descs[txq->txq_next];
   5972 	t->tcpip_ipcs = htole32(ipcs);
   5973 	t->tcpip_tucs = htole32(tucs);
   5974 	t->tcpip_cmdlen = htole32(cmdlen);
   5975 	t->tcpip_seg = htole32(seg);
   5976 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   5977 
   5978 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5979 	txs->txs_ndesc++;
   5980 
   5981 	*cmdp = cmd;
   5982 	*fieldsp = fields;
   5983 
   5984 	return 0;
   5985 }
   5986 
   5987 /*
   5988  * wm_start:		[ifnet interface function]
   5989  *
   5990  *	Start packet transmission on the interface.
   5991  */
   5992 static void
   5993 wm_start(struct ifnet *ifp)
   5994 {
   5995 	struct wm_softc *sc = ifp->if_softc;
   5996 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5997 
   5998 	WM_TX_LOCK(txq);
   5999 	if (!sc->sc_stopping)
   6000 		wm_start_locked(ifp);
   6001 	WM_TX_UNLOCK(txq);
   6002 }
   6003 
   6004 static void
   6005 wm_start_locked(struct ifnet *ifp)
   6006 {
   6007 	struct wm_softc *sc = ifp->if_softc;
   6008 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6009 	struct mbuf *m0;
   6010 	struct m_tag *mtag;
   6011 	struct wm_txsoft *txs;
   6012 	bus_dmamap_t dmamap;
   6013 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6014 	bus_addr_t curaddr;
   6015 	bus_size_t seglen, curlen;
   6016 	uint32_t cksumcmd;
   6017 	uint8_t cksumfields;
   6018 
   6019 	KASSERT(WM_TX_LOCKED(txq));
   6020 
   6021 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6022 		return;
   6023 
   6024 	/* Remember the previous number of free descriptors. */
   6025 	ofree = txq->txq_free;
   6026 
   6027 	/*
   6028 	 * Loop through the send queue, setting up transmit descriptors
   6029 	 * until we drain the queue, or use up all available transmit
   6030 	 * descriptors.
   6031 	 */
   6032 	for (;;) {
   6033 		m0 = NULL;
   6034 
   6035 		/* Get a work queue entry. */
   6036 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6037 			wm_txeof(sc);
   6038 			if (txq->txq_sfree == 0) {
   6039 				DPRINTF(WM_DEBUG_TX,
   6040 				    ("%s: TX: no free job descriptors\n",
   6041 					device_xname(sc->sc_dev)));
   6042 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6043 				break;
   6044 			}
   6045 		}
   6046 
   6047 		/* Grab a packet off the queue. */
   6048 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6049 		if (m0 == NULL)
   6050 			break;
   6051 
   6052 		DPRINTF(WM_DEBUG_TX,
   6053 		    ("%s: TX: have packet to transmit: %p\n",
   6054 		    device_xname(sc->sc_dev), m0));
   6055 
   6056 		txs = &txq->txq_soft[txq->txq_snext];
   6057 		dmamap = txs->txs_dmamap;
   6058 
   6059 		use_tso = (m0->m_pkthdr.csum_flags &
   6060 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6061 
   6062 		/*
   6063 		 * So says the Linux driver:
   6064 		 * The controller does a simple calculation to make sure
   6065 		 * there is enough room in the FIFO before initiating the
   6066 		 * DMA for each buffer.  The calc is:
   6067 		 *	4 = ceil(buffer len / MSS)
   6068 		 * To make sure we don't overrun the FIFO, adjust the max
   6069 		 * buffer len if the MSS drops.
   6070 		 */
   6071 		dmamap->dm_maxsegsz =
   6072 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6073 		    ? m0->m_pkthdr.segsz << 2
   6074 		    : WTX_MAX_LEN;
   6075 
   6076 		/*
   6077 		 * Load the DMA map.  If this fails, the packet either
   6078 		 * didn't fit in the allotted number of segments, or we
   6079 		 * were short on resources.  For the too-many-segments
   6080 		 * case, we simply report an error and drop the packet,
   6081 		 * since we can't sanely copy a jumbo packet to a single
   6082 		 * buffer.
   6083 		 */
   6084 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6085 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6086 		if (error) {
   6087 			if (error == EFBIG) {
   6088 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6089 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6090 				    "DMA segments, dropping...\n",
   6091 				    device_xname(sc->sc_dev));
   6092 				wm_dump_mbuf_chain(sc, m0);
   6093 				m_freem(m0);
   6094 				continue;
   6095 			}
   6096 			/*  Short on resources, just stop for now. */
   6097 			DPRINTF(WM_DEBUG_TX,
   6098 			    ("%s: TX: dmamap load failed: %d\n",
   6099 			    device_xname(sc->sc_dev), error));
   6100 			break;
   6101 		}
   6102 
   6103 		segs_needed = dmamap->dm_nsegs;
   6104 		if (use_tso) {
   6105 			/* For sentinel descriptor; see below. */
   6106 			segs_needed++;
   6107 		}
   6108 
   6109 		/*
   6110 		 * Ensure we have enough descriptors free to describe
   6111 		 * the packet.  Note, we always reserve one descriptor
   6112 		 * at the end of the ring due to the semantics of the
   6113 		 * TDT register, plus one more in the event we need
   6114 		 * to load offload context.
   6115 		 */
   6116 		if (segs_needed > txq->txq_free - 2) {
   6117 			/*
   6118 			 * Not enough free descriptors to transmit this
   6119 			 * packet.  We haven't committed anything yet,
   6120 			 * so just unload the DMA map, put the packet
   6121 			 * pack on the queue, and punt.  Notify the upper
   6122 			 * layer that there are no more slots left.
   6123 			 */
   6124 			DPRINTF(WM_DEBUG_TX,
   6125 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6126 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6127 			    segs_needed, txq->txq_free - 1));
   6128 			ifp->if_flags |= IFF_OACTIVE;
   6129 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6130 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6131 			break;
   6132 		}
   6133 
   6134 		/*
   6135 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6136 		 * once we know we can transmit the packet, since we
   6137 		 * do some internal FIFO space accounting here.
   6138 		 */
   6139 		if (sc->sc_type == WM_T_82547 &&
   6140 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6141 			DPRINTF(WM_DEBUG_TX,
   6142 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6143 			    device_xname(sc->sc_dev)));
   6144 			ifp->if_flags |= IFF_OACTIVE;
   6145 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6146 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6147 			break;
   6148 		}
   6149 
   6150 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6151 
   6152 		DPRINTF(WM_DEBUG_TX,
   6153 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6154 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6155 
   6156 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6157 
   6158 		/*
   6159 		 * Store a pointer to the packet so that we can free it
   6160 		 * later.
   6161 		 *
   6162 		 * Initially, we consider the number of descriptors the
   6163 		 * packet uses the number of DMA segments.  This may be
   6164 		 * incremented by 1 if we do checksum offload (a descriptor
   6165 		 * is used to set the checksum context).
   6166 		 */
   6167 		txs->txs_mbuf = m0;
   6168 		txs->txs_firstdesc = txq->txq_next;
   6169 		txs->txs_ndesc = segs_needed;
   6170 
   6171 		/* Set up offload parameters for this packet. */
   6172 		if (m0->m_pkthdr.csum_flags &
   6173 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6174 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6175 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6176 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6177 					  &cksumfields) != 0) {
   6178 				/* Error message already displayed. */
   6179 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6180 				continue;
   6181 			}
   6182 		} else {
   6183 			cksumcmd = 0;
   6184 			cksumfields = 0;
   6185 		}
   6186 
   6187 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6188 
   6189 		/* Sync the DMA map. */
   6190 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6191 		    BUS_DMASYNC_PREWRITE);
   6192 
   6193 		/* Initialize the transmit descriptor. */
   6194 		for (nexttx = txq->txq_next, seg = 0;
   6195 		     seg < dmamap->dm_nsegs; seg++) {
   6196 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6197 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6198 			     seglen != 0;
   6199 			     curaddr += curlen, seglen -= curlen,
   6200 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6201 				curlen = seglen;
   6202 
   6203 				/*
   6204 				 * So says the Linux driver:
   6205 				 * Work around for premature descriptor
   6206 				 * write-backs in TSO mode.  Append a
   6207 				 * 4-byte sentinel descriptor.
   6208 				 */
   6209 				if (use_tso &&
   6210 				    seg == dmamap->dm_nsegs - 1 &&
   6211 				    curlen > 8)
   6212 					curlen -= 4;
   6213 
   6214 				wm_set_dma_addr(
   6215 				    &txq->txq_descs[nexttx].wtx_addr,
   6216 				    curaddr);
   6217 				txq->txq_descs[nexttx].wtx_cmdlen =
   6218 				    htole32(cksumcmd | curlen);
   6219 				txq->txq_descs[nexttx].wtx_fields.wtxu_status =
   6220 				    0;
   6221 				txq->txq_descs[nexttx].wtx_fields.wtxu_options =
   6222 				    cksumfields;
   6223 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
   6224 				lasttx = nexttx;
   6225 
   6226 				DPRINTF(WM_DEBUG_TX,
   6227 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6228 				     "len %#04zx\n",
   6229 				    device_xname(sc->sc_dev), nexttx,
   6230 				    (uint64_t)curaddr, curlen));
   6231 			}
   6232 		}
   6233 
   6234 		KASSERT(lasttx != -1);
   6235 
   6236 		/*
   6237 		 * Set up the command byte on the last descriptor of
   6238 		 * the packet.  If we're in the interrupt delay window,
   6239 		 * delay the interrupt.
   6240 		 */
   6241 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6242 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6243 
   6244 		/*
   6245 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6246 		 * up the descriptor to encapsulate the packet for us.
   6247 		 *
   6248 		 * This is only valid on the last descriptor of the packet.
   6249 		 */
   6250 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6251 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6252 			    htole32(WTX_CMD_VLE);
   6253 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6254 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6255 		}
   6256 
   6257 		txs->txs_lastdesc = lasttx;
   6258 
   6259 		DPRINTF(WM_DEBUG_TX,
   6260 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6261 		    device_xname(sc->sc_dev),
   6262 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6263 
   6264 		/* Sync the descriptors we're using. */
   6265 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6266 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6267 
   6268 		/* Give the packet to the chip. */
   6269 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6270 
   6271 		DPRINTF(WM_DEBUG_TX,
   6272 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6273 
   6274 		DPRINTF(WM_DEBUG_TX,
   6275 		    ("%s: TX: finished transmitting packet, job %d\n",
   6276 		    device_xname(sc->sc_dev), txq->txq_snext));
   6277 
   6278 		/* Advance the tx pointer. */
   6279 		txq->txq_free -= txs->txs_ndesc;
   6280 		txq->txq_next = nexttx;
   6281 
   6282 		txq->txq_sfree--;
   6283 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6284 
   6285 		/* Pass the packet to any BPF listeners. */
   6286 		bpf_mtap(ifp, m0);
   6287 	}
   6288 
   6289 	if (m0 != NULL) {
   6290 		ifp->if_flags |= IFF_OACTIVE;
   6291 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6292 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6293 		m_freem(m0);
   6294 	}
   6295 
   6296 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6297 		/* No more slots; notify upper layer. */
   6298 		ifp->if_flags |= IFF_OACTIVE;
   6299 	}
   6300 
   6301 	if (txq->txq_free != ofree) {
   6302 		/* Set a watchdog timer in case the chip flakes out. */
   6303 		ifp->if_timer = 5;
   6304 	}
   6305 }
   6306 
   6307 /*
   6308  * wm_nq_tx_offload:
   6309  *
   6310  *	Set up TCP/IP checksumming parameters for the
   6311  *	specified packet, for NEWQUEUE devices
   6312  */
   6313 static int
   6314 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6315     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6316 {
   6317 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6318 	struct mbuf *m0 = txs->txs_mbuf;
   6319 	struct m_tag *mtag;
   6320 	uint32_t vl_len, mssidx, cmdc;
   6321 	struct ether_header *eh;
   6322 	int offset, iphl;
   6323 
   6324 	/*
   6325 	 * XXX It would be nice if the mbuf pkthdr had offset
   6326 	 * fields for the protocol headers.
   6327 	 */
   6328 	*cmdlenp = 0;
   6329 	*fieldsp = 0;
   6330 
   6331 	eh = mtod(m0, struct ether_header *);
   6332 	switch (htons(eh->ether_type)) {
   6333 	case ETHERTYPE_IP:
   6334 	case ETHERTYPE_IPV6:
   6335 		offset = ETHER_HDR_LEN;
   6336 		break;
   6337 
   6338 	case ETHERTYPE_VLAN:
   6339 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6340 		break;
   6341 
   6342 	default:
   6343 		/* Don't support this protocol or encapsulation. */
   6344 		*do_csum = false;
   6345 		return 0;
   6346 	}
   6347 	*do_csum = true;
   6348 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6349 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6350 
   6351 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6352 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6353 
   6354 	if ((m0->m_pkthdr.csum_flags &
   6355 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   6356 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6357 	} else {
   6358 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6359 	}
   6360 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6361 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6362 
   6363 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6364 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6365 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6366 		*cmdlenp |= NQTX_CMD_VLE;
   6367 	}
   6368 
   6369 	mssidx = 0;
   6370 
   6371 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6372 		int hlen = offset + iphl;
   6373 		int tcp_hlen;
   6374 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6375 
   6376 		if (__predict_false(m0->m_len <
   6377 				    (hlen + sizeof(struct tcphdr)))) {
   6378 			/*
   6379 			 * TCP/IP headers are not in the first mbuf; we need
   6380 			 * to do this the slow and painful way.  Let's just
   6381 			 * hope this doesn't happen very often.
   6382 			 */
   6383 			struct tcphdr th;
   6384 
   6385 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6386 
   6387 			m_copydata(m0, hlen, sizeof(th), &th);
   6388 			if (v4) {
   6389 				struct ip ip;
   6390 
   6391 				m_copydata(m0, offset, sizeof(ip), &ip);
   6392 				ip.ip_len = 0;
   6393 				m_copyback(m0,
   6394 				    offset + offsetof(struct ip, ip_len),
   6395 				    sizeof(ip.ip_len), &ip.ip_len);
   6396 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6397 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6398 			} else {
   6399 				struct ip6_hdr ip6;
   6400 
   6401 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6402 				ip6.ip6_plen = 0;
   6403 				m_copyback(m0,
   6404 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6405 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6406 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6407 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6408 			}
   6409 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6410 			    sizeof(th.th_sum), &th.th_sum);
   6411 
   6412 			tcp_hlen = th.th_off << 2;
   6413 		} else {
   6414 			/*
   6415 			 * TCP/IP headers are in the first mbuf; we can do
   6416 			 * this the easy way.
   6417 			 */
   6418 			struct tcphdr *th;
   6419 
   6420 			if (v4) {
   6421 				struct ip *ip =
   6422 				    (void *)(mtod(m0, char *) + offset);
   6423 				th = (void *)(mtod(m0, char *) + hlen);
   6424 
   6425 				ip->ip_len = 0;
   6426 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6427 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6428 			} else {
   6429 				struct ip6_hdr *ip6 =
   6430 				    (void *)(mtod(m0, char *) + offset);
   6431 				th = (void *)(mtod(m0, char *) + hlen);
   6432 
   6433 				ip6->ip6_plen = 0;
   6434 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6435 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6436 			}
   6437 			tcp_hlen = th->th_off << 2;
   6438 		}
   6439 		hlen += tcp_hlen;
   6440 		*cmdlenp |= NQTX_CMD_TSE;
   6441 
   6442 		if (v4) {
   6443 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6444 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6445 		} else {
   6446 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6447 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6448 		}
   6449 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6450 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6451 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6452 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6453 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6454 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6455 	} else {
   6456 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6457 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6458 	}
   6459 
   6460 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6461 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6462 		cmdc |= NQTXC_CMD_IP4;
   6463 	}
   6464 
   6465 	if (m0->m_pkthdr.csum_flags &
   6466 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6467 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6468 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6469 			cmdc |= NQTXC_CMD_TCP;
   6470 		} else {
   6471 			cmdc |= NQTXC_CMD_UDP;
   6472 		}
   6473 		cmdc |= NQTXC_CMD_IP4;
   6474 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6475 	}
   6476 	if (m0->m_pkthdr.csum_flags &
   6477 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6478 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6479 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6480 			cmdc |= NQTXC_CMD_TCP;
   6481 		} else {
   6482 			cmdc |= NQTXC_CMD_UDP;
   6483 		}
   6484 		cmdc |= NQTXC_CMD_IP6;
   6485 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6486 	}
   6487 
   6488 	/* Fill in the context descriptor. */
   6489 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6490 	    htole32(vl_len);
   6491 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6492 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6493 	    htole32(cmdc);
   6494 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6495 	    htole32(mssidx);
   6496 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6497 	DPRINTF(WM_DEBUG_TX,
   6498 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6499 	    txq->txq_next, 0, vl_len));
   6500 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6501 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6502 	txs->txs_ndesc++;
   6503 	return 0;
   6504 }
   6505 
   6506 /*
   6507  * wm_nq_start:		[ifnet interface function]
   6508  *
   6509  *	Start packet transmission on the interface for NEWQUEUE devices
   6510  */
   6511 static void
   6512 wm_nq_start(struct ifnet *ifp)
   6513 {
   6514 	struct wm_softc *sc = ifp->if_softc;
   6515 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6516 
   6517 	WM_TX_LOCK(txq);
   6518 	if (!sc->sc_stopping)
   6519 		wm_nq_start_locked(ifp);
   6520 	WM_TX_UNLOCK(txq);
   6521 }
   6522 
   6523 static void
   6524 wm_nq_start_locked(struct ifnet *ifp)
   6525 {
   6526 	struct wm_softc *sc = ifp->if_softc;
   6527 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6528 	struct mbuf *m0;
   6529 	struct m_tag *mtag;
   6530 	struct wm_txsoft *txs;
   6531 	bus_dmamap_t dmamap;
   6532 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6533 	bool do_csum, sent;
   6534 
   6535 	KASSERT(WM_TX_LOCKED(txq));
   6536 
   6537 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6538 		return;
   6539 
   6540 	sent = false;
   6541 
   6542 	/*
   6543 	 * Loop through the send queue, setting up transmit descriptors
   6544 	 * until we drain the queue, or use up all available transmit
   6545 	 * descriptors.
   6546 	 */
   6547 	for (;;) {
   6548 		m0 = NULL;
   6549 
   6550 		/* Get a work queue entry. */
   6551 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6552 			wm_txeof(sc);
   6553 			if (txq->txq_sfree == 0) {
   6554 				DPRINTF(WM_DEBUG_TX,
   6555 				    ("%s: TX: no free job descriptors\n",
   6556 					device_xname(sc->sc_dev)));
   6557 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6558 				break;
   6559 			}
   6560 		}
   6561 
   6562 		/* Grab a packet off the queue. */
   6563 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6564 		if (m0 == NULL)
   6565 			break;
   6566 
   6567 		DPRINTF(WM_DEBUG_TX,
   6568 		    ("%s: TX: have packet to transmit: %p\n",
   6569 		    device_xname(sc->sc_dev), m0));
   6570 
   6571 		txs = &txq->txq_soft[txq->txq_snext];
   6572 		dmamap = txs->txs_dmamap;
   6573 
   6574 		/*
   6575 		 * Load the DMA map.  If this fails, the packet either
   6576 		 * didn't fit in the allotted number of segments, or we
   6577 		 * were short on resources.  For the too-many-segments
   6578 		 * case, we simply report an error and drop the packet,
   6579 		 * since we can't sanely copy a jumbo packet to a single
   6580 		 * buffer.
   6581 		 */
   6582 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6583 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6584 		if (error) {
   6585 			if (error == EFBIG) {
   6586 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6587 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6588 				    "DMA segments, dropping...\n",
   6589 				    device_xname(sc->sc_dev));
   6590 				wm_dump_mbuf_chain(sc, m0);
   6591 				m_freem(m0);
   6592 				continue;
   6593 			}
   6594 			/* Short on resources, just stop for now. */
   6595 			DPRINTF(WM_DEBUG_TX,
   6596 			    ("%s: TX: dmamap load failed: %d\n",
   6597 			    device_xname(sc->sc_dev), error));
   6598 			break;
   6599 		}
   6600 
   6601 		segs_needed = dmamap->dm_nsegs;
   6602 
   6603 		/*
   6604 		 * Ensure we have enough descriptors free to describe
   6605 		 * the packet.  Note, we always reserve one descriptor
   6606 		 * at the end of the ring due to the semantics of the
   6607 		 * TDT register, plus one more in the event we need
   6608 		 * to load offload context.
   6609 		 */
   6610 		if (segs_needed > txq->txq_free - 2) {
   6611 			/*
   6612 			 * Not enough free descriptors to transmit this
   6613 			 * packet.  We haven't committed anything yet,
   6614 			 * so just unload the DMA map, put the packet
   6615 			 * pack on the queue, and punt.  Notify the upper
   6616 			 * layer that there are no more slots left.
   6617 			 */
   6618 			DPRINTF(WM_DEBUG_TX,
   6619 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6620 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6621 			    segs_needed, txq->txq_free - 1));
   6622 			ifp->if_flags |= IFF_OACTIVE;
   6623 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6624 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6625 			break;
   6626 		}
   6627 
   6628 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6629 
   6630 		DPRINTF(WM_DEBUG_TX,
   6631 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6632 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6633 
   6634 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6635 
   6636 		/*
   6637 		 * Store a pointer to the packet so that we can free it
   6638 		 * later.
   6639 		 *
   6640 		 * Initially, we consider the number of descriptors the
   6641 		 * packet uses the number of DMA segments.  This may be
   6642 		 * incremented by 1 if we do checksum offload (a descriptor
   6643 		 * is used to set the checksum context).
   6644 		 */
   6645 		txs->txs_mbuf = m0;
   6646 		txs->txs_firstdesc = txq->txq_next;
   6647 		txs->txs_ndesc = segs_needed;
   6648 
   6649 		/* Set up offload parameters for this packet. */
   6650 		uint32_t cmdlen, fields, dcmdlen;
   6651 		if (m0->m_pkthdr.csum_flags &
   6652 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6653 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6654 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6655 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6656 			    &do_csum) != 0) {
   6657 				/* Error message already displayed. */
   6658 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6659 				continue;
   6660 			}
   6661 		} else {
   6662 			do_csum = false;
   6663 			cmdlen = 0;
   6664 			fields = 0;
   6665 		}
   6666 
   6667 		/* Sync the DMA map. */
   6668 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6669 		    BUS_DMASYNC_PREWRITE);
   6670 
   6671 		/* Initialize the first transmit descriptor. */
   6672 		nexttx = txq->txq_next;
   6673 		if (!do_csum) {
   6674 			/* setup a legacy descriptor */
   6675 			wm_set_dma_addr(
   6676 			    &txq->txq_descs[nexttx].wtx_addr,
   6677 			    dmamap->dm_segs[0].ds_addr);
   6678 			txq->txq_descs[nexttx].wtx_cmdlen =
   6679 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6680 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6681 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6682 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6683 			    NULL) {
   6684 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6685 				    htole32(WTX_CMD_VLE);
   6686 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6687 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6688 			} else {
   6689 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6690 			}
   6691 			dcmdlen = 0;
   6692 		} else {
   6693 			/* setup an advanced data descriptor */
   6694 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6695 			    htole64(dmamap->dm_segs[0].ds_addr);
   6696 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6697 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6698 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6699 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6700 			    htole32(fields);
   6701 			DPRINTF(WM_DEBUG_TX,
   6702 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6703 			    device_xname(sc->sc_dev), nexttx,
   6704 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6705 			DPRINTF(WM_DEBUG_TX,
   6706 			    ("\t 0x%08x%08x\n", fields,
   6707 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6708 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6709 		}
   6710 
   6711 		lasttx = nexttx;
   6712 		nexttx = WM_NEXTTX(txq, nexttx);
   6713 		/*
   6714 		 * fill in the next descriptors. legacy or adcanced format
   6715 		 * is the same here
   6716 		 */
   6717 		for (seg = 1; seg < dmamap->dm_nsegs;
   6718 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6719 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6720 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6721 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6722 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6723 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6724 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6725 			lasttx = nexttx;
   6726 
   6727 			DPRINTF(WM_DEBUG_TX,
   6728 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6729 			     "len %#04zx\n",
   6730 			    device_xname(sc->sc_dev), nexttx,
   6731 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6732 			    dmamap->dm_segs[seg].ds_len));
   6733 		}
   6734 
   6735 		KASSERT(lasttx != -1);
   6736 
   6737 		/*
   6738 		 * Set up the command byte on the last descriptor of
   6739 		 * the packet.  If we're in the interrupt delay window,
   6740 		 * delay the interrupt.
   6741 		 */
   6742 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6743 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6744 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6745 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6746 
   6747 		txs->txs_lastdesc = lasttx;
   6748 
   6749 		DPRINTF(WM_DEBUG_TX,
   6750 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6751 		    device_xname(sc->sc_dev),
   6752 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6753 
   6754 		/* Sync the descriptors we're using. */
   6755 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6756 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6757 
   6758 		/* Give the packet to the chip. */
   6759 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6760 		sent = true;
   6761 
   6762 		DPRINTF(WM_DEBUG_TX,
   6763 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6764 
   6765 		DPRINTF(WM_DEBUG_TX,
   6766 		    ("%s: TX: finished transmitting packet, job %d\n",
   6767 		    device_xname(sc->sc_dev), txq->txq_snext));
   6768 
   6769 		/* Advance the tx pointer. */
   6770 		txq->txq_free -= txs->txs_ndesc;
   6771 		txq->txq_next = nexttx;
   6772 
   6773 		txq->txq_sfree--;
   6774 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6775 
   6776 		/* Pass the packet to any BPF listeners. */
   6777 		bpf_mtap(ifp, m0);
   6778 	}
   6779 
   6780 	if (m0 != NULL) {
   6781 		ifp->if_flags |= IFF_OACTIVE;
   6782 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6783 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6784 		m_freem(m0);
   6785 	}
   6786 
   6787 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6788 		/* No more slots; notify upper layer. */
   6789 		ifp->if_flags |= IFF_OACTIVE;
   6790 	}
   6791 
   6792 	if (sent) {
   6793 		/* Set a watchdog timer in case the chip flakes out. */
   6794 		ifp->if_timer = 5;
   6795 	}
   6796 }
   6797 
   6798 /* Interrupt */
   6799 
   6800 /*
   6801  * wm_txeof:
   6802  *
   6803  *	Helper; handle transmit interrupts.
   6804  */
   6805 static int
   6806 wm_txeof(struct wm_softc *sc)
   6807 {
   6808 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6809 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6810 	struct wm_txsoft *txs;
   6811 	bool processed = false;
   6812 	int count = 0;
   6813 	int i;
   6814 	uint8_t status;
   6815 
   6816 	if (sc->sc_stopping)
   6817 		return 0;
   6818 
   6819 	ifp->if_flags &= ~IFF_OACTIVE;
   6820 
   6821 	/*
   6822 	 * Go through the Tx list and free mbufs for those
   6823 	 * frames which have been transmitted.
   6824 	 */
   6825 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6826 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6827 		txs = &txq->txq_soft[i];
   6828 
   6829 		DPRINTF(WM_DEBUG_TX,
   6830 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   6831 
   6832 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6833 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6834 
   6835 		status =
   6836 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6837 		if ((status & WTX_ST_DD) == 0) {
   6838 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6839 			    BUS_DMASYNC_PREREAD);
   6840 			break;
   6841 		}
   6842 
   6843 		processed = true;
   6844 		count++;
   6845 		DPRINTF(WM_DEBUG_TX,
   6846 		    ("%s: TX: job %d done: descs %d..%d\n",
   6847 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6848 		    txs->txs_lastdesc));
   6849 
   6850 		/*
   6851 		 * XXX We should probably be using the statistics
   6852 		 * XXX registers, but I don't know if they exist
   6853 		 * XXX on chips before the i82544.
   6854 		 */
   6855 
   6856 #ifdef WM_EVENT_COUNTERS
   6857 		if (status & WTX_ST_TU)
   6858 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   6859 #endif /* WM_EVENT_COUNTERS */
   6860 
   6861 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   6862 			ifp->if_oerrors++;
   6863 			if (status & WTX_ST_LC)
   6864 				log(LOG_WARNING, "%s: late collision\n",
   6865 				    device_xname(sc->sc_dev));
   6866 			else if (status & WTX_ST_EC) {
   6867 				ifp->if_collisions += 16;
   6868 				log(LOG_WARNING, "%s: excessive collisions\n",
   6869 				    device_xname(sc->sc_dev));
   6870 			}
   6871 		} else
   6872 			ifp->if_opackets++;
   6873 
   6874 		txq->txq_free += txs->txs_ndesc;
   6875 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   6876 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   6877 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   6878 		m_freem(txs->txs_mbuf);
   6879 		txs->txs_mbuf = NULL;
   6880 	}
   6881 
   6882 	/* Update the dirty transmit buffer pointer. */
   6883 	txq->txq_sdirty = i;
   6884 	DPRINTF(WM_DEBUG_TX,
   6885 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   6886 
   6887 	if (count != 0)
   6888 		rnd_add_uint32(&sc->rnd_source, count);
   6889 
   6890 	/*
   6891 	 * If there are no more pending transmissions, cancel the watchdog
   6892 	 * timer.
   6893 	 */
   6894 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   6895 		ifp->if_timer = 0;
   6896 
   6897 	return processed;
   6898 }
   6899 
   6900 /*
   6901  * wm_rxeof:
   6902  *
   6903  *	Helper; handle receive interrupts.
   6904  */
   6905 static void
   6906 wm_rxeof(struct wm_rxqueue *rxq)
   6907 {
   6908 	struct wm_softc *sc = rxq->rxq_sc;
   6909 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6910 	struct wm_rxsoft *rxs;
   6911 	struct mbuf *m;
   6912 	int i, len;
   6913 	int count = 0;
   6914 	uint8_t status, errors;
   6915 	uint16_t vlantag;
   6916 
   6917 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   6918 		rxs = &rxq->rxq_soft[i];
   6919 
   6920 		DPRINTF(WM_DEBUG_RX,
   6921 		    ("%s: RX: checking descriptor %d\n",
   6922 		    device_xname(sc->sc_dev), i));
   6923 
   6924 		wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6925 
   6926 		status = rxq->rxq_descs[i].wrx_status;
   6927 		errors = rxq->rxq_descs[i].wrx_errors;
   6928 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   6929 		vlantag = rxq->rxq_descs[i].wrx_special;
   6930 
   6931 		if ((status & WRX_ST_DD) == 0) {
   6932 			/* We have processed all of the receive descriptors. */
   6933 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   6934 			break;
   6935 		}
   6936 
   6937 		count++;
   6938 		if (__predict_false(rxq->rxq_discard)) {
   6939 			DPRINTF(WM_DEBUG_RX,
   6940 			    ("%s: RX: discarding contents of descriptor %d\n",
   6941 			    device_xname(sc->sc_dev), i));
   6942 			wm_init_rxdesc(rxq, i);
   6943 			if (status & WRX_ST_EOP) {
   6944 				/* Reset our state. */
   6945 				DPRINTF(WM_DEBUG_RX,
   6946 				    ("%s: RX: resetting rxdiscard -> 0\n",
   6947 				    device_xname(sc->sc_dev)));
   6948 				rxq->rxq_discard = 0;
   6949 			}
   6950 			continue;
   6951 		}
   6952 
   6953 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6954 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   6955 
   6956 		m = rxs->rxs_mbuf;
   6957 
   6958 		/*
   6959 		 * Add a new receive buffer to the ring, unless of
   6960 		 * course the length is zero. Treat the latter as a
   6961 		 * failed mapping.
   6962 		 */
   6963 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   6964 			/*
   6965 			 * Failed, throw away what we've done so
   6966 			 * far, and discard the rest of the packet.
   6967 			 */
   6968 			ifp->if_ierrors++;
   6969 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6970 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   6971 			wm_init_rxdesc(rxq, i);
   6972 			if ((status & WRX_ST_EOP) == 0)
   6973 				rxq->rxq_discard = 1;
   6974 			if (rxq->rxq_head != NULL)
   6975 				m_freem(rxq->rxq_head);
   6976 			WM_RXCHAIN_RESET(rxq);
   6977 			DPRINTF(WM_DEBUG_RX,
   6978 			    ("%s: RX: Rx buffer allocation failed, "
   6979 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   6980 			    rxq->rxq_discard ? " (discard)" : ""));
   6981 			continue;
   6982 		}
   6983 
   6984 		m->m_len = len;
   6985 		rxq->rxq_len += len;
   6986 		DPRINTF(WM_DEBUG_RX,
   6987 		    ("%s: RX: buffer at %p len %d\n",
   6988 		    device_xname(sc->sc_dev), m->m_data, len));
   6989 
   6990 		/* If this is not the end of the packet, keep looking. */
   6991 		if ((status & WRX_ST_EOP) == 0) {
   6992 			WM_RXCHAIN_LINK(rxq, m);
   6993 			DPRINTF(WM_DEBUG_RX,
   6994 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   6995 			    device_xname(sc->sc_dev), rxq->rxq_len));
   6996 			continue;
   6997 		}
   6998 
   6999 		/*
   7000 		 * Okay, we have the entire packet now.  The chip is
   7001 		 * configured to include the FCS except I350 and I21[01]
   7002 		 * (not all chips can be configured to strip it),
   7003 		 * so we need to trim it.
   7004 		 * May need to adjust length of previous mbuf in the
   7005 		 * chain if the current mbuf is too short.
   7006 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7007 		 * is always set in I350, so we don't trim it.
   7008 		 */
   7009 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7010 		    && (sc->sc_type != WM_T_I210)
   7011 		    && (sc->sc_type != WM_T_I211)) {
   7012 			if (m->m_len < ETHER_CRC_LEN) {
   7013 				rxq->rxq_tail->m_len
   7014 				    -= (ETHER_CRC_LEN - m->m_len);
   7015 				m->m_len = 0;
   7016 			} else
   7017 				m->m_len -= ETHER_CRC_LEN;
   7018 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7019 		} else
   7020 			len = rxq->rxq_len;
   7021 
   7022 		WM_RXCHAIN_LINK(rxq, m);
   7023 
   7024 		*rxq->rxq_tailp = NULL;
   7025 		m = rxq->rxq_head;
   7026 
   7027 		WM_RXCHAIN_RESET(rxq);
   7028 
   7029 		DPRINTF(WM_DEBUG_RX,
   7030 		    ("%s: RX: have entire packet, len -> %d\n",
   7031 		    device_xname(sc->sc_dev), len));
   7032 
   7033 		/* If an error occurred, update stats and drop the packet. */
   7034 		if (errors &
   7035 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7036 			if (errors & WRX_ER_SE)
   7037 				log(LOG_WARNING, "%s: symbol error\n",
   7038 				    device_xname(sc->sc_dev));
   7039 			else if (errors & WRX_ER_SEQ)
   7040 				log(LOG_WARNING, "%s: receive sequence error\n",
   7041 				    device_xname(sc->sc_dev));
   7042 			else if (errors & WRX_ER_CE)
   7043 				log(LOG_WARNING, "%s: CRC error\n",
   7044 				    device_xname(sc->sc_dev));
   7045 			m_freem(m);
   7046 			continue;
   7047 		}
   7048 
   7049 		/* No errors.  Receive the packet. */
   7050 		m->m_pkthdr.rcvif = ifp;
   7051 		m->m_pkthdr.len = len;
   7052 
   7053 		/*
   7054 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7055 		 * for us.  Associate the tag with the packet.
   7056 		 */
   7057 		/* XXXX should check for i350 and i354 */
   7058 		if ((status & WRX_ST_VP) != 0) {
   7059 			VLAN_INPUT_TAG(ifp, m,
   7060 			    le16toh(vlantag),
   7061 			    continue);
   7062 		}
   7063 
   7064 		/* Set up checksum info for this packet. */
   7065 		if ((status & WRX_ST_IXSM) == 0) {
   7066 			if (status & WRX_ST_IPCS) {
   7067 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7068 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7069 				if (errors & WRX_ER_IPE)
   7070 					m->m_pkthdr.csum_flags |=
   7071 					    M_CSUM_IPv4_BAD;
   7072 			}
   7073 			if (status & WRX_ST_TCPCS) {
   7074 				/*
   7075 				 * Note: we don't know if this was TCP or UDP,
   7076 				 * so we just set both bits, and expect the
   7077 				 * upper layers to deal.
   7078 				 */
   7079 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7080 				m->m_pkthdr.csum_flags |=
   7081 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7082 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7083 				if (errors & WRX_ER_TCPE)
   7084 					m->m_pkthdr.csum_flags |=
   7085 					    M_CSUM_TCP_UDP_BAD;
   7086 			}
   7087 		}
   7088 
   7089 		ifp->if_ipackets++;
   7090 
   7091 		WM_RX_UNLOCK(rxq);
   7092 
   7093 		/* Pass this up to any BPF listeners. */
   7094 		bpf_mtap(ifp, m);
   7095 
   7096 		/* Pass it on. */
   7097 		(*ifp->if_input)(ifp, m);
   7098 
   7099 		WM_RX_LOCK(rxq);
   7100 
   7101 		if (sc->sc_stopping)
   7102 			break;
   7103 	}
   7104 
   7105 	/* Update the receive pointer. */
   7106 	rxq->rxq_ptr = i;
   7107 	if (count != 0)
   7108 		rnd_add_uint32(&sc->rnd_source, count);
   7109 
   7110 	DPRINTF(WM_DEBUG_RX,
   7111 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7112 }
   7113 
   7114 /*
   7115  * wm_linkintr_gmii:
   7116  *
   7117  *	Helper; handle link interrupts for GMII.
   7118  */
   7119 static void
   7120 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7121 {
   7122 
   7123 	KASSERT(WM_CORE_LOCKED(sc));
   7124 
   7125 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7126 		__func__));
   7127 
   7128 	if (icr & ICR_LSC) {
   7129 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7130 
   7131 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7132 			wm_gig_downshift_workaround_ich8lan(sc);
   7133 
   7134 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7135 			device_xname(sc->sc_dev)));
   7136 		mii_pollstat(&sc->sc_mii);
   7137 		if (sc->sc_type == WM_T_82543) {
   7138 			int miistatus, active;
   7139 
   7140 			/*
   7141 			 * With 82543, we need to force speed and
   7142 			 * duplex on the MAC equal to what the PHY
   7143 			 * speed and duplex configuration is.
   7144 			 */
   7145 			miistatus = sc->sc_mii.mii_media_status;
   7146 
   7147 			if (miistatus & IFM_ACTIVE) {
   7148 				active = sc->sc_mii.mii_media_active;
   7149 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7150 				switch (IFM_SUBTYPE(active)) {
   7151 				case IFM_10_T:
   7152 					sc->sc_ctrl |= CTRL_SPEED_10;
   7153 					break;
   7154 				case IFM_100_TX:
   7155 					sc->sc_ctrl |= CTRL_SPEED_100;
   7156 					break;
   7157 				case IFM_1000_T:
   7158 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7159 					break;
   7160 				default:
   7161 					/*
   7162 					 * fiber?
   7163 					 * Shoud not enter here.
   7164 					 */
   7165 					printf("unknown media (%x)\n",
   7166 					    active);
   7167 					break;
   7168 				}
   7169 				if (active & IFM_FDX)
   7170 					sc->sc_ctrl |= CTRL_FD;
   7171 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7172 			}
   7173 		} else if ((sc->sc_type == WM_T_ICH8)
   7174 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7175 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7176 		} else if (sc->sc_type == WM_T_PCH) {
   7177 			wm_k1_gig_workaround_hv(sc,
   7178 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7179 		}
   7180 
   7181 		if ((sc->sc_phytype == WMPHY_82578)
   7182 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7183 			== IFM_1000_T)) {
   7184 
   7185 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7186 				delay(200*1000); /* XXX too big */
   7187 
   7188 				/* Link stall fix for link up */
   7189 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7190 				    HV_MUX_DATA_CTRL,
   7191 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7192 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7193 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7194 				    HV_MUX_DATA_CTRL,
   7195 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7196 			}
   7197 		}
   7198 	} else if (icr & ICR_RXSEQ) {
   7199 		DPRINTF(WM_DEBUG_LINK,
   7200 		    ("%s: LINK Receive sequence error\n",
   7201 			device_xname(sc->sc_dev)));
   7202 	}
   7203 }
   7204 
   7205 /*
   7206  * wm_linkintr_tbi:
   7207  *
   7208  *	Helper; handle link interrupts for TBI mode.
   7209  */
   7210 static void
   7211 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7212 {
   7213 	uint32_t status;
   7214 
   7215 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7216 		__func__));
   7217 
   7218 	status = CSR_READ(sc, WMREG_STATUS);
   7219 	if (icr & ICR_LSC) {
   7220 		if (status & STATUS_LU) {
   7221 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7222 			    device_xname(sc->sc_dev),
   7223 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7224 			/*
   7225 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7226 			 * so we should update sc->sc_ctrl
   7227 			 */
   7228 
   7229 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7230 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7231 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7232 			if (status & STATUS_FD)
   7233 				sc->sc_tctl |=
   7234 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7235 			else
   7236 				sc->sc_tctl |=
   7237 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7238 			if (sc->sc_ctrl & CTRL_TFCE)
   7239 				sc->sc_fcrtl |= FCRTL_XONE;
   7240 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7241 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7242 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7243 				      sc->sc_fcrtl);
   7244 			sc->sc_tbi_linkup = 1;
   7245 		} else {
   7246 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7247 			    device_xname(sc->sc_dev)));
   7248 			sc->sc_tbi_linkup = 0;
   7249 		}
   7250 		/* Update LED */
   7251 		wm_tbi_serdes_set_linkled(sc);
   7252 	} else if (icr & ICR_RXSEQ) {
   7253 		DPRINTF(WM_DEBUG_LINK,
   7254 		    ("%s: LINK: Receive sequence error\n",
   7255 		    device_xname(sc->sc_dev)));
   7256 	}
   7257 }
   7258 
   7259 /*
   7260  * wm_linkintr_serdes:
   7261  *
   7262  *	Helper; handle link interrupts for TBI mode.
   7263  */
   7264 static void
   7265 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7266 {
   7267 	struct mii_data *mii = &sc->sc_mii;
   7268 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7269 	uint32_t pcs_adv, pcs_lpab, reg;
   7270 
   7271 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7272 		__func__));
   7273 
   7274 	if (icr & ICR_LSC) {
   7275 		/* Check PCS */
   7276 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7277 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7278 			mii->mii_media_status |= IFM_ACTIVE;
   7279 			sc->sc_tbi_linkup = 1;
   7280 		} else {
   7281 			mii->mii_media_status |= IFM_NONE;
   7282 			sc->sc_tbi_linkup = 0;
   7283 			wm_tbi_serdes_set_linkled(sc);
   7284 			return;
   7285 		}
   7286 		mii->mii_media_active |= IFM_1000_SX;
   7287 		if ((reg & PCS_LSTS_FDX) != 0)
   7288 			mii->mii_media_active |= IFM_FDX;
   7289 		else
   7290 			mii->mii_media_active |= IFM_HDX;
   7291 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7292 			/* Check flow */
   7293 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7294 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7295 				DPRINTF(WM_DEBUG_LINK,
   7296 				    ("XXX LINKOK but not ACOMP\n"));
   7297 				return;
   7298 			}
   7299 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7300 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7301 			DPRINTF(WM_DEBUG_LINK,
   7302 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7303 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7304 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7305 				mii->mii_media_active |= IFM_FLOW
   7306 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7307 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7308 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7309 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7310 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7311 				mii->mii_media_active |= IFM_FLOW
   7312 				    | IFM_ETH_TXPAUSE;
   7313 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7314 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7315 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7316 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7317 				mii->mii_media_active |= IFM_FLOW
   7318 				    | IFM_ETH_RXPAUSE;
   7319 		}
   7320 		/* Update LED */
   7321 		wm_tbi_serdes_set_linkled(sc);
   7322 	} else {
   7323 		DPRINTF(WM_DEBUG_LINK,
   7324 		    ("%s: LINK: Receive sequence error\n",
   7325 		    device_xname(sc->sc_dev)));
   7326 	}
   7327 }
   7328 
   7329 /*
   7330  * wm_linkintr:
   7331  *
   7332  *	Helper; handle link interrupts.
   7333  */
   7334 static void
   7335 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7336 {
   7337 
   7338 	KASSERT(WM_CORE_LOCKED(sc));
   7339 
   7340 	if (sc->sc_flags & WM_F_HAS_MII)
   7341 		wm_linkintr_gmii(sc, icr);
   7342 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7343 	    && (sc->sc_type >= WM_T_82575))
   7344 		wm_linkintr_serdes(sc, icr);
   7345 	else
   7346 		wm_linkintr_tbi(sc, icr);
   7347 }
   7348 
   7349 /*
   7350  * wm_intr_legacy:
   7351  *
   7352  *	Interrupt service routine for INTx and MSI.
   7353  */
   7354 static int
   7355 wm_intr_legacy(void *arg)
   7356 {
   7357 	struct wm_softc *sc = arg;
   7358 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7359 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7360 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7361 	uint32_t icr, rndval = 0;
   7362 	int handled = 0;
   7363 
   7364 	DPRINTF(WM_DEBUG_TX,
   7365 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7366 	while (1 /* CONSTCOND */) {
   7367 		icr = CSR_READ(sc, WMREG_ICR);
   7368 		if ((icr & sc->sc_icr) == 0)
   7369 			break;
   7370 		if (rndval == 0)
   7371 			rndval = icr;
   7372 
   7373 		WM_RX_LOCK(rxq);
   7374 
   7375 		if (sc->sc_stopping) {
   7376 			WM_RX_UNLOCK(rxq);
   7377 			break;
   7378 		}
   7379 
   7380 		handled = 1;
   7381 
   7382 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7383 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   7384 			DPRINTF(WM_DEBUG_RX,
   7385 			    ("%s: RX: got Rx intr 0x%08x\n",
   7386 			    device_xname(sc->sc_dev),
   7387 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   7388 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7389 		}
   7390 #endif
   7391 		wm_rxeof(rxq);
   7392 
   7393 		WM_RX_UNLOCK(rxq);
   7394 		WM_TX_LOCK(txq);
   7395 
   7396 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7397 		if (icr & ICR_TXDW) {
   7398 			DPRINTF(WM_DEBUG_TX,
   7399 			    ("%s: TX: got TXDW interrupt\n",
   7400 			    device_xname(sc->sc_dev)));
   7401 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7402 		}
   7403 #endif
   7404 		wm_txeof(sc);
   7405 
   7406 		WM_TX_UNLOCK(txq);
   7407 		WM_CORE_LOCK(sc);
   7408 
   7409 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   7410 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7411 			wm_linkintr(sc, icr);
   7412 		}
   7413 
   7414 		WM_CORE_UNLOCK(sc);
   7415 
   7416 		if (icr & ICR_RXO) {
   7417 #if defined(WM_DEBUG)
   7418 			log(LOG_WARNING, "%s: Receive overrun\n",
   7419 			    device_xname(sc->sc_dev));
   7420 #endif /* defined(WM_DEBUG) */
   7421 		}
   7422 	}
   7423 
   7424 	rnd_add_uint32(&sc->rnd_source, rndval);
   7425 
   7426 	if (handled) {
   7427 		/* Try to get more packets going. */
   7428 		ifp->if_start(ifp);
   7429 	}
   7430 
   7431 	return handled;
   7432 }
   7433 
   7434 /*
   7435  * wm_txintr_msix:
   7436  *
   7437  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7438  */
   7439 static int
   7440 wm_txintr_msix(void *arg)
   7441 {
   7442 	struct wm_txqueue *txq = arg;
   7443 	struct wm_softc *sc = txq->txq_sc;
   7444 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7445 	int handled = 0;
   7446 
   7447 	DPRINTF(WM_DEBUG_TX,
   7448 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7449 
   7450 	if (sc->sc_type == WM_T_82574)
   7451 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7452 	else if (sc->sc_type == WM_T_82575)
   7453 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7454 	else
   7455 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7456 
   7457 	WM_TX_LOCK(txq);
   7458 
   7459 	if (sc->sc_stopping)
   7460 		goto out;
   7461 
   7462 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7463 	handled = wm_txeof(sc);
   7464 
   7465 out:
   7466 	WM_TX_UNLOCK(txq);
   7467 
   7468 	if (sc->sc_type == WM_T_82574)
   7469 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7470 	else if (sc->sc_type == WM_T_82575)
   7471 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7472 	else
   7473 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7474 
   7475 	if (handled) {
   7476 		/* Try to get more packets going. */
   7477 		ifp->if_start(ifp);
   7478 	}
   7479 
   7480 	return handled;
   7481 }
   7482 
   7483 /*
   7484  * wm_rxintr_msix:
   7485  *
   7486  *	Interrupt service routine for RX interrupt for MSI-X.
   7487  */
   7488 static int
   7489 wm_rxintr_msix(void *arg)
   7490 {
   7491 	struct wm_rxqueue *rxq = arg;
   7492 	struct wm_softc *sc = rxq->rxq_sc;
   7493 
   7494 	DPRINTF(WM_DEBUG_RX,
   7495 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7496 
   7497 	if (sc->sc_type == WM_T_82574)
   7498 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
   7499 	else if (sc->sc_type == WM_T_82575)
   7500 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7501 	else
   7502 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7503 
   7504 	WM_RX_LOCK(rxq);
   7505 
   7506 	if (sc->sc_stopping)
   7507 		goto out;
   7508 
   7509 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7510 	wm_rxeof(rxq);
   7511 
   7512 out:
   7513 	WM_RX_UNLOCK(rxq);
   7514 
   7515 	if (sc->sc_type == WM_T_82574)
   7516 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7517 	else if (sc->sc_type == WM_T_82575)
   7518 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7519 	else
   7520 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7521 
   7522 	return 1;
   7523 }
   7524 
   7525 /*
   7526  * wm_linkintr_msix:
   7527  *
   7528  *	Interrupt service routine for link status change for MSI-X.
   7529  */
   7530 static int
   7531 wm_linkintr_msix(void *arg)
   7532 {
   7533 	struct wm_softc *sc = arg;
   7534 	uint32_t reg;
   7535 
   7536 	DPRINTF(WM_DEBUG_LINK,
   7537 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7538 
   7539 	reg = CSR_READ(sc, WMREG_ICR);
   7540 	WM_CORE_LOCK(sc);
   7541 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7542 		goto out;
   7543 
   7544 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7545 	wm_linkintr(sc, ICR_LSC);
   7546 
   7547 out:
   7548 	WM_CORE_UNLOCK(sc);
   7549 
   7550 	if (sc->sc_type == WM_T_82574)
   7551 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
   7552 	else if (sc->sc_type == WM_T_82575)
   7553 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7554 	else
   7555 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7556 
   7557 	return 1;
   7558 }
   7559 
   7560 /*
   7561  * Media related.
   7562  * GMII, SGMII, TBI (and SERDES)
   7563  */
   7564 
   7565 /* Common */
   7566 
   7567 /*
   7568  * wm_tbi_serdes_set_linkled:
   7569  *
   7570  *	Update the link LED on TBI and SERDES devices.
   7571  */
   7572 static void
   7573 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7574 {
   7575 
   7576 	if (sc->sc_tbi_linkup)
   7577 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7578 	else
   7579 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7580 
   7581 	/* 82540 or newer devices are active low */
   7582 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7583 
   7584 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7585 }
   7586 
   7587 /* GMII related */
   7588 
   7589 /*
   7590  * wm_gmii_reset:
   7591  *
   7592  *	Reset the PHY.
   7593  */
   7594 static void
   7595 wm_gmii_reset(struct wm_softc *sc)
   7596 {
   7597 	uint32_t reg;
   7598 	int rv;
   7599 
   7600 	/* get phy semaphore */
   7601 	switch (sc->sc_type) {
   7602 	case WM_T_82571:
   7603 	case WM_T_82572:
   7604 	case WM_T_82573:
   7605 	case WM_T_82574:
   7606 	case WM_T_82583:
   7607 		 /* XXX should get sw semaphore, too */
   7608 		rv = wm_get_swsm_semaphore(sc);
   7609 		break;
   7610 	case WM_T_82575:
   7611 	case WM_T_82576:
   7612 	case WM_T_82580:
   7613 	case WM_T_I350:
   7614 	case WM_T_I354:
   7615 	case WM_T_I210:
   7616 	case WM_T_I211:
   7617 	case WM_T_80003:
   7618 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7619 		break;
   7620 	case WM_T_ICH8:
   7621 	case WM_T_ICH9:
   7622 	case WM_T_ICH10:
   7623 	case WM_T_PCH:
   7624 	case WM_T_PCH2:
   7625 	case WM_T_PCH_LPT:
   7626 		rv = wm_get_swfwhw_semaphore(sc);
   7627 		break;
   7628 	default:
   7629 		/* nothing to do*/
   7630 		rv = 0;
   7631 		break;
   7632 	}
   7633 	if (rv != 0) {
   7634 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7635 		    __func__);
   7636 		return;
   7637 	}
   7638 
   7639 	switch (sc->sc_type) {
   7640 	case WM_T_82542_2_0:
   7641 	case WM_T_82542_2_1:
   7642 		/* null */
   7643 		break;
   7644 	case WM_T_82543:
   7645 		/*
   7646 		 * With 82543, we need to force speed and duplex on the MAC
   7647 		 * equal to what the PHY speed and duplex configuration is.
   7648 		 * In addition, we need to perform a hardware reset on the PHY
   7649 		 * to take it out of reset.
   7650 		 */
   7651 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7652 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7653 
   7654 		/* The PHY reset pin is active-low. */
   7655 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7656 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7657 		    CTRL_EXT_SWDPIN(4));
   7658 		reg |= CTRL_EXT_SWDPIO(4);
   7659 
   7660 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7661 		CSR_WRITE_FLUSH(sc);
   7662 		delay(10*1000);
   7663 
   7664 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7665 		CSR_WRITE_FLUSH(sc);
   7666 		delay(150);
   7667 #if 0
   7668 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7669 #endif
   7670 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7671 		break;
   7672 	case WM_T_82544:	/* reset 10000us */
   7673 	case WM_T_82540:
   7674 	case WM_T_82545:
   7675 	case WM_T_82545_3:
   7676 	case WM_T_82546:
   7677 	case WM_T_82546_3:
   7678 	case WM_T_82541:
   7679 	case WM_T_82541_2:
   7680 	case WM_T_82547:
   7681 	case WM_T_82547_2:
   7682 	case WM_T_82571:	/* reset 100us */
   7683 	case WM_T_82572:
   7684 	case WM_T_82573:
   7685 	case WM_T_82574:
   7686 	case WM_T_82575:
   7687 	case WM_T_82576:
   7688 	case WM_T_82580:
   7689 	case WM_T_I350:
   7690 	case WM_T_I354:
   7691 	case WM_T_I210:
   7692 	case WM_T_I211:
   7693 	case WM_T_82583:
   7694 	case WM_T_80003:
   7695 		/* generic reset */
   7696 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7697 		CSR_WRITE_FLUSH(sc);
   7698 		delay(20000);
   7699 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7700 		CSR_WRITE_FLUSH(sc);
   7701 		delay(20000);
   7702 
   7703 		if ((sc->sc_type == WM_T_82541)
   7704 		    || (sc->sc_type == WM_T_82541_2)
   7705 		    || (sc->sc_type == WM_T_82547)
   7706 		    || (sc->sc_type == WM_T_82547_2)) {
   7707 			/* workaround for igp are done in igp_reset() */
   7708 			/* XXX add code to set LED after phy reset */
   7709 		}
   7710 		break;
   7711 	case WM_T_ICH8:
   7712 	case WM_T_ICH9:
   7713 	case WM_T_ICH10:
   7714 	case WM_T_PCH:
   7715 	case WM_T_PCH2:
   7716 	case WM_T_PCH_LPT:
   7717 		/* generic reset */
   7718 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7719 		CSR_WRITE_FLUSH(sc);
   7720 		delay(100);
   7721 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7722 		CSR_WRITE_FLUSH(sc);
   7723 		delay(150);
   7724 		break;
   7725 	default:
   7726 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7727 		    __func__);
   7728 		break;
   7729 	}
   7730 
   7731 	/* release PHY semaphore */
   7732 	switch (sc->sc_type) {
   7733 	case WM_T_82571:
   7734 	case WM_T_82572:
   7735 	case WM_T_82573:
   7736 	case WM_T_82574:
   7737 	case WM_T_82583:
   7738 		 /* XXX should put sw semaphore, too */
   7739 		wm_put_swsm_semaphore(sc);
   7740 		break;
   7741 	case WM_T_82575:
   7742 	case WM_T_82576:
   7743 	case WM_T_82580:
   7744 	case WM_T_I350:
   7745 	case WM_T_I354:
   7746 	case WM_T_I210:
   7747 	case WM_T_I211:
   7748 	case WM_T_80003:
   7749 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7750 		break;
   7751 	case WM_T_ICH8:
   7752 	case WM_T_ICH9:
   7753 	case WM_T_ICH10:
   7754 	case WM_T_PCH:
   7755 	case WM_T_PCH2:
   7756 	case WM_T_PCH_LPT:
   7757 		wm_put_swfwhw_semaphore(sc);
   7758 		break;
   7759 	default:
   7760 		/* nothing to do*/
   7761 		rv = 0;
   7762 		break;
   7763 	}
   7764 
   7765 	/* get_cfg_done */
   7766 	wm_get_cfg_done(sc);
   7767 
   7768 	/* extra setup */
   7769 	switch (sc->sc_type) {
   7770 	case WM_T_82542_2_0:
   7771 	case WM_T_82542_2_1:
   7772 	case WM_T_82543:
   7773 	case WM_T_82544:
   7774 	case WM_T_82540:
   7775 	case WM_T_82545:
   7776 	case WM_T_82545_3:
   7777 	case WM_T_82546:
   7778 	case WM_T_82546_3:
   7779 	case WM_T_82541_2:
   7780 	case WM_T_82547_2:
   7781 	case WM_T_82571:
   7782 	case WM_T_82572:
   7783 	case WM_T_82573:
   7784 	case WM_T_82575:
   7785 	case WM_T_82576:
   7786 	case WM_T_82580:
   7787 	case WM_T_I350:
   7788 	case WM_T_I354:
   7789 	case WM_T_I210:
   7790 	case WM_T_I211:
   7791 	case WM_T_80003:
   7792 		/* null */
   7793 		break;
   7794 	case WM_T_82574:
   7795 	case WM_T_82583:
   7796 		wm_lplu_d0_disable(sc);
   7797 		break;
   7798 	case WM_T_82541:
   7799 	case WM_T_82547:
   7800 		/* XXX Configure actively LED after PHY reset */
   7801 		break;
   7802 	case WM_T_ICH8:
   7803 	case WM_T_ICH9:
   7804 	case WM_T_ICH10:
   7805 	case WM_T_PCH:
   7806 	case WM_T_PCH2:
   7807 	case WM_T_PCH_LPT:
   7808 		/* Allow time for h/w to get to a quiescent state afer reset */
   7809 		delay(10*1000);
   7810 
   7811 		if (sc->sc_type == WM_T_PCH)
   7812 			wm_hv_phy_workaround_ich8lan(sc);
   7813 
   7814 		if (sc->sc_type == WM_T_PCH2)
   7815 			wm_lv_phy_workaround_ich8lan(sc);
   7816 
   7817 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7818 			/*
   7819 			 * dummy read to clear the phy wakeup bit after lcd
   7820 			 * reset
   7821 			 */
   7822 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7823 		}
   7824 
   7825 		/*
   7826 		 * XXX Configure the LCD with th extended configuration region
   7827 		 * in NVM
   7828 		 */
   7829 
   7830 		/* Disable D0 LPLU. */
   7831 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7832 			wm_lplu_d0_disable_pch(sc);
   7833 		else
   7834 			wm_lplu_d0_disable(sc);	/* ICH* */
   7835 		break;
   7836 	default:
   7837 		panic("%s: unknown type\n", __func__);
   7838 		break;
   7839 	}
   7840 }
   7841 
   7842 /*
   7843  * wm_get_phy_id_82575:
   7844  *
   7845  * Return PHY ID. Return -1 if it failed.
   7846  */
   7847 static int
   7848 wm_get_phy_id_82575(struct wm_softc *sc)
   7849 {
   7850 	uint32_t reg;
   7851 	int phyid = -1;
   7852 
   7853 	/* XXX */
   7854 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7855 		return -1;
   7856 
   7857 	if (wm_sgmii_uses_mdio(sc)) {
   7858 		switch (sc->sc_type) {
   7859 		case WM_T_82575:
   7860 		case WM_T_82576:
   7861 			reg = CSR_READ(sc, WMREG_MDIC);
   7862 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7863 			break;
   7864 		case WM_T_82580:
   7865 		case WM_T_I350:
   7866 		case WM_T_I354:
   7867 		case WM_T_I210:
   7868 		case WM_T_I211:
   7869 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7870 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7871 			break;
   7872 		default:
   7873 			return -1;
   7874 		}
   7875 	}
   7876 
   7877 	return phyid;
   7878 }
   7879 
   7880 
   7881 /*
   7882  * wm_gmii_mediainit:
   7883  *
   7884  *	Initialize media for use on 1000BASE-T devices.
   7885  */
   7886 static void
   7887 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   7888 {
   7889 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7890 	struct mii_data *mii = &sc->sc_mii;
   7891 	uint32_t reg;
   7892 
   7893 	/* We have GMII. */
   7894 	sc->sc_flags |= WM_F_HAS_MII;
   7895 
   7896 	if (sc->sc_type == WM_T_80003)
   7897 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7898 	else
   7899 		sc->sc_tipg = TIPG_1000T_DFLT;
   7900 
   7901 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   7902 	if ((sc->sc_type == WM_T_82580)
   7903 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   7904 	    || (sc->sc_type == WM_T_I211)) {
   7905 		reg = CSR_READ(sc, WMREG_PHPM);
   7906 		reg &= ~PHPM_GO_LINK_D;
   7907 		CSR_WRITE(sc, WMREG_PHPM, reg);
   7908 	}
   7909 
   7910 	/*
   7911 	 * Let the chip set speed/duplex on its own based on
   7912 	 * signals from the PHY.
   7913 	 * XXXbouyer - I'm not sure this is right for the 80003,
   7914 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   7915 	 */
   7916 	sc->sc_ctrl |= CTRL_SLU;
   7917 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7918 
   7919 	/* Initialize our media structures and probe the GMII. */
   7920 	mii->mii_ifp = ifp;
   7921 
   7922 	/*
   7923 	 * Determine the PHY access method.
   7924 	 *
   7925 	 *  For SGMII, use SGMII specific method.
   7926 	 *
   7927 	 *  For some devices, we can determine the PHY access method
   7928 	 * from sc_type.
   7929 	 *
   7930 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   7931 	 * access  method by sc_type, so use the PCI product ID for some
   7932 	 * devices.
   7933 	 * For other ICH8 variants, try to use igp's method. If the PHY
   7934 	 * can't detect, then use bm's method.
   7935 	 */
   7936 	switch (prodid) {
   7937 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   7938 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   7939 		/* 82577 */
   7940 		sc->sc_phytype = WMPHY_82577;
   7941 		break;
   7942 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   7943 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   7944 		/* 82578 */
   7945 		sc->sc_phytype = WMPHY_82578;
   7946 		break;
   7947 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   7948 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   7949 		/* 82579 */
   7950 		sc->sc_phytype = WMPHY_82579;
   7951 		break;
   7952 	case PCI_PRODUCT_INTEL_82801I_BM:
   7953 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   7954 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   7955 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   7956 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   7957 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   7958 		/* 82567 */
   7959 		sc->sc_phytype = WMPHY_BM;
   7960 		mii->mii_readreg = wm_gmii_bm_readreg;
   7961 		mii->mii_writereg = wm_gmii_bm_writereg;
   7962 		break;
   7963 	default:
   7964 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   7965 		    && !wm_sgmii_uses_mdio(sc)){
   7966 			/* SGMII */
   7967 			mii->mii_readreg = wm_sgmii_readreg;
   7968 			mii->mii_writereg = wm_sgmii_writereg;
   7969 		} else if (sc->sc_type >= WM_T_80003) {
   7970 			/* 80003 */
   7971 			mii->mii_readreg = wm_gmii_i80003_readreg;
   7972 			mii->mii_writereg = wm_gmii_i80003_writereg;
   7973 		} else if (sc->sc_type >= WM_T_I210) {
   7974 			/* I210 and I211 */
   7975 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   7976 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   7977 		} else if (sc->sc_type >= WM_T_82580) {
   7978 			/* 82580, I350 and I354 */
   7979 			sc->sc_phytype = WMPHY_82580;
   7980 			mii->mii_readreg = wm_gmii_82580_readreg;
   7981 			mii->mii_writereg = wm_gmii_82580_writereg;
   7982 		} else if (sc->sc_type >= WM_T_82544) {
   7983 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   7984 			mii->mii_readreg = wm_gmii_i82544_readreg;
   7985 			mii->mii_writereg = wm_gmii_i82544_writereg;
   7986 		} else {
   7987 			mii->mii_readreg = wm_gmii_i82543_readreg;
   7988 			mii->mii_writereg = wm_gmii_i82543_writereg;
   7989 		}
   7990 		break;
   7991 	}
   7992 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   7993 		/* All PCH* use _hv_ */
   7994 		mii->mii_readreg = wm_gmii_hv_readreg;
   7995 		mii->mii_writereg = wm_gmii_hv_writereg;
   7996 	}
   7997 	mii->mii_statchg = wm_gmii_statchg;
   7998 
   7999 	wm_gmii_reset(sc);
   8000 
   8001 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8002 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8003 	    wm_gmii_mediastatus);
   8004 
   8005 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8006 	    || (sc->sc_type == WM_T_82580)
   8007 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8008 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8009 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8010 			/* Attach only one port */
   8011 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8012 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8013 		} else {
   8014 			int i, id;
   8015 			uint32_t ctrl_ext;
   8016 
   8017 			id = wm_get_phy_id_82575(sc);
   8018 			if (id != -1) {
   8019 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8020 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8021 			}
   8022 			if ((id == -1)
   8023 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8024 				/* Power on sgmii phy if it is disabled */
   8025 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8026 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8027 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8028 				CSR_WRITE_FLUSH(sc);
   8029 				delay(300*1000); /* XXX too long */
   8030 
   8031 				/* from 1 to 8 */
   8032 				for (i = 1; i < 8; i++)
   8033 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8034 					    0xffffffff, i, MII_OFFSET_ANY,
   8035 					    MIIF_DOPAUSE);
   8036 
   8037 				/* restore previous sfp cage power state */
   8038 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8039 			}
   8040 		}
   8041 	} else {
   8042 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8043 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8044 	}
   8045 
   8046 	/*
   8047 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8048 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8049 	 */
   8050 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8051 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8052 		wm_set_mdio_slow_mode_hv(sc);
   8053 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8054 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8055 	}
   8056 
   8057 	/*
   8058 	 * (For ICH8 variants)
   8059 	 * If PHY detection failed, use BM's r/w function and retry.
   8060 	 */
   8061 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8062 		/* if failed, retry with *_bm_* */
   8063 		mii->mii_readreg = wm_gmii_bm_readreg;
   8064 		mii->mii_writereg = wm_gmii_bm_writereg;
   8065 
   8066 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8067 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8068 	}
   8069 
   8070 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8071 		/* Any PHY wasn't find */
   8072 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   8073 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   8074 		sc->sc_phytype = WMPHY_NONE;
   8075 	} else {
   8076 		/*
   8077 		 * PHY Found!
   8078 		 * Check PHY type.
   8079 		 */
   8080 		uint32_t model;
   8081 		struct mii_softc *child;
   8082 
   8083 		child = LIST_FIRST(&mii->mii_phys);
   8084 		model = child->mii_mpd_model;
   8085 		if (model == MII_MODEL_yyINTEL_I82566)
   8086 			sc->sc_phytype = WMPHY_IGP_3;
   8087 
   8088 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8089 	}
   8090 }
   8091 
   8092 /*
   8093  * wm_gmii_mediachange:	[ifmedia interface function]
   8094  *
   8095  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8096  */
   8097 static int
   8098 wm_gmii_mediachange(struct ifnet *ifp)
   8099 {
   8100 	struct wm_softc *sc = ifp->if_softc;
   8101 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8102 	int rc;
   8103 
   8104 	if ((ifp->if_flags & IFF_UP) == 0)
   8105 		return 0;
   8106 
   8107 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8108 	sc->sc_ctrl |= CTRL_SLU;
   8109 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8110 	    || (sc->sc_type > WM_T_82543)) {
   8111 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8112 	} else {
   8113 		sc->sc_ctrl &= ~CTRL_ASDE;
   8114 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8115 		if (ife->ifm_media & IFM_FDX)
   8116 			sc->sc_ctrl |= CTRL_FD;
   8117 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8118 		case IFM_10_T:
   8119 			sc->sc_ctrl |= CTRL_SPEED_10;
   8120 			break;
   8121 		case IFM_100_TX:
   8122 			sc->sc_ctrl |= CTRL_SPEED_100;
   8123 			break;
   8124 		case IFM_1000_T:
   8125 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8126 			break;
   8127 		default:
   8128 			panic("wm_gmii_mediachange: bad media 0x%x",
   8129 			    ife->ifm_media);
   8130 		}
   8131 	}
   8132 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8133 	if (sc->sc_type <= WM_T_82543)
   8134 		wm_gmii_reset(sc);
   8135 
   8136 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8137 		return 0;
   8138 	return rc;
   8139 }
   8140 
   8141 /*
   8142  * wm_gmii_mediastatus:	[ifmedia interface function]
   8143  *
   8144  *	Get the current interface media status on a 1000BASE-T device.
   8145  */
   8146 static void
   8147 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8148 {
   8149 	struct wm_softc *sc = ifp->if_softc;
   8150 
   8151 	ether_mediastatus(ifp, ifmr);
   8152 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8153 	    | sc->sc_flowflags;
   8154 }
   8155 
   8156 #define	MDI_IO		CTRL_SWDPIN(2)
   8157 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8158 #define	MDI_CLK		CTRL_SWDPIN(3)
   8159 
   8160 static void
   8161 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8162 {
   8163 	uint32_t i, v;
   8164 
   8165 	v = CSR_READ(sc, WMREG_CTRL);
   8166 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8167 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8168 
   8169 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8170 		if (data & i)
   8171 			v |= MDI_IO;
   8172 		else
   8173 			v &= ~MDI_IO;
   8174 		CSR_WRITE(sc, WMREG_CTRL, v);
   8175 		CSR_WRITE_FLUSH(sc);
   8176 		delay(10);
   8177 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8178 		CSR_WRITE_FLUSH(sc);
   8179 		delay(10);
   8180 		CSR_WRITE(sc, WMREG_CTRL, v);
   8181 		CSR_WRITE_FLUSH(sc);
   8182 		delay(10);
   8183 	}
   8184 }
   8185 
   8186 static uint32_t
   8187 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8188 {
   8189 	uint32_t v, i, data = 0;
   8190 
   8191 	v = CSR_READ(sc, WMREG_CTRL);
   8192 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8193 	v |= CTRL_SWDPIO(3);
   8194 
   8195 	CSR_WRITE(sc, WMREG_CTRL, v);
   8196 	CSR_WRITE_FLUSH(sc);
   8197 	delay(10);
   8198 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8199 	CSR_WRITE_FLUSH(sc);
   8200 	delay(10);
   8201 	CSR_WRITE(sc, WMREG_CTRL, v);
   8202 	CSR_WRITE_FLUSH(sc);
   8203 	delay(10);
   8204 
   8205 	for (i = 0; i < 16; i++) {
   8206 		data <<= 1;
   8207 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8208 		CSR_WRITE_FLUSH(sc);
   8209 		delay(10);
   8210 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8211 			data |= 1;
   8212 		CSR_WRITE(sc, WMREG_CTRL, v);
   8213 		CSR_WRITE_FLUSH(sc);
   8214 		delay(10);
   8215 	}
   8216 
   8217 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8218 	CSR_WRITE_FLUSH(sc);
   8219 	delay(10);
   8220 	CSR_WRITE(sc, WMREG_CTRL, v);
   8221 	CSR_WRITE_FLUSH(sc);
   8222 	delay(10);
   8223 
   8224 	return data;
   8225 }
   8226 
   8227 #undef MDI_IO
   8228 #undef MDI_DIR
   8229 #undef MDI_CLK
   8230 
   8231 /*
   8232  * wm_gmii_i82543_readreg:	[mii interface function]
   8233  *
   8234  *	Read a PHY register on the GMII (i82543 version).
   8235  */
   8236 static int
   8237 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8238 {
   8239 	struct wm_softc *sc = device_private(self);
   8240 	int rv;
   8241 
   8242 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8243 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8244 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8245 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8246 
   8247 	DPRINTF(WM_DEBUG_GMII,
   8248 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8249 	    device_xname(sc->sc_dev), phy, reg, rv));
   8250 
   8251 	return rv;
   8252 }
   8253 
   8254 /*
   8255  * wm_gmii_i82543_writereg:	[mii interface function]
   8256  *
   8257  *	Write a PHY register on the GMII (i82543 version).
   8258  */
   8259 static void
   8260 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8261 {
   8262 	struct wm_softc *sc = device_private(self);
   8263 
   8264 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8265 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8266 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8267 	    (MII_COMMAND_START << 30), 32);
   8268 }
   8269 
   8270 /*
   8271  * wm_gmii_i82544_readreg:	[mii interface function]
   8272  *
   8273  *	Read a PHY register on the GMII.
   8274  */
   8275 static int
   8276 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8277 {
   8278 	struct wm_softc *sc = device_private(self);
   8279 	uint32_t mdic = 0;
   8280 	int i, rv;
   8281 
   8282 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8283 	    MDIC_REGADD(reg));
   8284 
   8285 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8286 		mdic = CSR_READ(sc, WMREG_MDIC);
   8287 		if (mdic & MDIC_READY)
   8288 			break;
   8289 		delay(50);
   8290 	}
   8291 
   8292 	if ((mdic & MDIC_READY) == 0) {
   8293 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8294 		    device_xname(sc->sc_dev), phy, reg);
   8295 		rv = 0;
   8296 	} else if (mdic & MDIC_E) {
   8297 #if 0 /* This is normal if no PHY is present. */
   8298 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8299 		    device_xname(sc->sc_dev), phy, reg);
   8300 #endif
   8301 		rv = 0;
   8302 	} else {
   8303 		rv = MDIC_DATA(mdic);
   8304 		if (rv == 0xffff)
   8305 			rv = 0;
   8306 	}
   8307 
   8308 	return rv;
   8309 }
   8310 
   8311 /*
   8312  * wm_gmii_i82544_writereg:	[mii interface function]
   8313  *
   8314  *	Write a PHY register on the GMII.
   8315  */
   8316 static void
   8317 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8318 {
   8319 	struct wm_softc *sc = device_private(self);
   8320 	uint32_t mdic = 0;
   8321 	int i;
   8322 
   8323 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8324 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8325 
   8326 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8327 		mdic = CSR_READ(sc, WMREG_MDIC);
   8328 		if (mdic & MDIC_READY)
   8329 			break;
   8330 		delay(50);
   8331 	}
   8332 
   8333 	if ((mdic & MDIC_READY) == 0)
   8334 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8335 		    device_xname(sc->sc_dev), phy, reg);
   8336 	else if (mdic & MDIC_E)
   8337 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8338 		    device_xname(sc->sc_dev), phy, reg);
   8339 }
   8340 
   8341 /*
   8342  * wm_gmii_i80003_readreg:	[mii interface function]
   8343  *
   8344  *	Read a PHY register on the kumeran
   8345  * This could be handled by the PHY layer if we didn't have to lock the
   8346  * ressource ...
   8347  */
   8348 static int
   8349 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8350 {
   8351 	struct wm_softc *sc = device_private(self);
   8352 	int sem;
   8353 	int rv;
   8354 
   8355 	if (phy != 1) /* only one PHY on kumeran bus */
   8356 		return 0;
   8357 
   8358 	sem = swfwphysem[sc->sc_funcid];
   8359 	if (wm_get_swfw_semaphore(sc, sem)) {
   8360 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8361 		    __func__);
   8362 		return 0;
   8363 	}
   8364 
   8365 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8366 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8367 		    reg >> GG82563_PAGE_SHIFT);
   8368 	} else {
   8369 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8370 		    reg >> GG82563_PAGE_SHIFT);
   8371 	}
   8372 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8373 	delay(200);
   8374 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8375 	delay(200);
   8376 
   8377 	wm_put_swfw_semaphore(sc, sem);
   8378 	return rv;
   8379 }
   8380 
   8381 /*
   8382  * wm_gmii_i80003_writereg:	[mii interface function]
   8383  *
   8384  *	Write a PHY register on the kumeran.
   8385  * This could be handled by the PHY layer if we didn't have to lock the
   8386  * ressource ...
   8387  */
   8388 static void
   8389 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8390 {
   8391 	struct wm_softc *sc = device_private(self);
   8392 	int sem;
   8393 
   8394 	if (phy != 1) /* only one PHY on kumeran bus */
   8395 		return;
   8396 
   8397 	sem = swfwphysem[sc->sc_funcid];
   8398 	if (wm_get_swfw_semaphore(sc, sem)) {
   8399 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8400 		    __func__);
   8401 		return;
   8402 	}
   8403 
   8404 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8405 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8406 		    reg >> GG82563_PAGE_SHIFT);
   8407 	} else {
   8408 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8409 		    reg >> GG82563_PAGE_SHIFT);
   8410 	}
   8411 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8412 	delay(200);
   8413 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8414 	delay(200);
   8415 
   8416 	wm_put_swfw_semaphore(sc, sem);
   8417 }
   8418 
   8419 /*
   8420  * wm_gmii_bm_readreg:	[mii interface function]
   8421  *
   8422  *	Read a PHY register on the kumeran
   8423  * This could be handled by the PHY layer if we didn't have to lock the
   8424  * ressource ...
   8425  */
   8426 static int
   8427 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8428 {
   8429 	struct wm_softc *sc = device_private(self);
   8430 	int sem;
   8431 	int rv;
   8432 
   8433 	sem = swfwphysem[sc->sc_funcid];
   8434 	if (wm_get_swfw_semaphore(sc, sem)) {
   8435 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8436 		    __func__);
   8437 		return 0;
   8438 	}
   8439 
   8440 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8441 		if (phy == 1)
   8442 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8443 			    reg);
   8444 		else
   8445 			wm_gmii_i82544_writereg(self, phy,
   8446 			    GG82563_PHY_PAGE_SELECT,
   8447 			    reg >> GG82563_PAGE_SHIFT);
   8448 	}
   8449 
   8450 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8451 	wm_put_swfw_semaphore(sc, sem);
   8452 	return rv;
   8453 }
   8454 
   8455 /*
   8456  * wm_gmii_bm_writereg:	[mii interface function]
   8457  *
   8458  *	Write a PHY register on the kumeran.
   8459  * This could be handled by the PHY layer if we didn't have to lock the
   8460  * ressource ...
   8461  */
   8462 static void
   8463 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8464 {
   8465 	struct wm_softc *sc = device_private(self);
   8466 	int sem;
   8467 
   8468 	sem = swfwphysem[sc->sc_funcid];
   8469 	if (wm_get_swfw_semaphore(sc, sem)) {
   8470 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8471 		    __func__);
   8472 		return;
   8473 	}
   8474 
   8475 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8476 		if (phy == 1)
   8477 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8478 			    reg);
   8479 		else
   8480 			wm_gmii_i82544_writereg(self, phy,
   8481 			    GG82563_PHY_PAGE_SELECT,
   8482 			    reg >> GG82563_PAGE_SHIFT);
   8483 	}
   8484 
   8485 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8486 	wm_put_swfw_semaphore(sc, sem);
   8487 }
   8488 
   8489 static void
   8490 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8491 {
   8492 	struct wm_softc *sc = device_private(self);
   8493 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8494 	uint16_t wuce;
   8495 
   8496 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8497 	if (sc->sc_type == WM_T_PCH) {
   8498 		/* XXX e1000 driver do nothing... why? */
   8499 	}
   8500 
   8501 	/* Set page 769 */
   8502 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8503 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8504 
   8505 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8506 
   8507 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8508 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8509 	    wuce | BM_WUC_ENABLE_BIT);
   8510 
   8511 	/* Select page 800 */
   8512 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8513 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8514 
   8515 	/* Write page 800 */
   8516 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8517 
   8518 	if (rd)
   8519 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8520 	else
   8521 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8522 
   8523 	/* Set page 769 */
   8524 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8525 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8526 
   8527 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8528 }
   8529 
   8530 /*
   8531  * wm_gmii_hv_readreg:	[mii interface function]
   8532  *
   8533  *	Read a PHY register on the kumeran
   8534  * This could be handled by the PHY layer if we didn't have to lock the
   8535  * ressource ...
   8536  */
   8537 static int
   8538 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8539 {
   8540 	struct wm_softc *sc = device_private(self);
   8541 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8542 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8543 	uint16_t val;
   8544 	int rv;
   8545 
   8546 	if (wm_get_swfwhw_semaphore(sc)) {
   8547 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8548 		    __func__);
   8549 		return 0;
   8550 	}
   8551 
   8552 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8553 	if (sc->sc_phytype == WMPHY_82577) {
   8554 		/* XXX must write */
   8555 	}
   8556 
   8557 	/* Page 800 works differently than the rest so it has its own func */
   8558 	if (page == BM_WUC_PAGE) {
   8559 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8560 		return val;
   8561 	}
   8562 
   8563 	/*
   8564 	 * Lower than page 768 works differently than the rest so it has its
   8565 	 * own func
   8566 	 */
   8567 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8568 		printf("gmii_hv_readreg!!!\n");
   8569 		return 0;
   8570 	}
   8571 
   8572 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8573 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8574 		    page << BME1000_PAGE_SHIFT);
   8575 	}
   8576 
   8577 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8578 	wm_put_swfwhw_semaphore(sc);
   8579 	return rv;
   8580 }
   8581 
   8582 /*
   8583  * wm_gmii_hv_writereg:	[mii interface function]
   8584  *
   8585  *	Write a PHY register on the kumeran.
   8586  * This could be handled by the PHY layer if we didn't have to lock the
   8587  * ressource ...
   8588  */
   8589 static void
   8590 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8591 {
   8592 	struct wm_softc *sc = device_private(self);
   8593 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8594 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8595 
   8596 	if (wm_get_swfwhw_semaphore(sc)) {
   8597 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8598 		    __func__);
   8599 		return;
   8600 	}
   8601 
   8602 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8603 
   8604 	/* Page 800 works differently than the rest so it has its own func */
   8605 	if (page == BM_WUC_PAGE) {
   8606 		uint16_t tmp;
   8607 
   8608 		tmp = val;
   8609 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8610 		return;
   8611 	}
   8612 
   8613 	/*
   8614 	 * Lower than page 768 works differently than the rest so it has its
   8615 	 * own func
   8616 	 */
   8617 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8618 		printf("gmii_hv_writereg!!!\n");
   8619 		return;
   8620 	}
   8621 
   8622 	/*
   8623 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8624 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8625 	 */
   8626 
   8627 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8628 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8629 		    page << BME1000_PAGE_SHIFT);
   8630 	}
   8631 
   8632 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8633 	wm_put_swfwhw_semaphore(sc);
   8634 }
   8635 
   8636 /*
   8637  * wm_gmii_82580_readreg:	[mii interface function]
   8638  *
   8639  *	Read a PHY register on the 82580 and I350.
   8640  * This could be handled by the PHY layer if we didn't have to lock the
   8641  * ressource ...
   8642  */
   8643 static int
   8644 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8645 {
   8646 	struct wm_softc *sc = device_private(self);
   8647 	int sem;
   8648 	int rv;
   8649 
   8650 	sem = swfwphysem[sc->sc_funcid];
   8651 	if (wm_get_swfw_semaphore(sc, sem)) {
   8652 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8653 		    __func__);
   8654 		return 0;
   8655 	}
   8656 
   8657 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8658 
   8659 	wm_put_swfw_semaphore(sc, sem);
   8660 	return rv;
   8661 }
   8662 
   8663 /*
   8664  * wm_gmii_82580_writereg:	[mii interface function]
   8665  *
   8666  *	Write a PHY register on the 82580 and I350.
   8667  * This could be handled by the PHY layer if we didn't have to lock the
   8668  * ressource ...
   8669  */
   8670 static void
   8671 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8672 {
   8673 	struct wm_softc *sc = device_private(self);
   8674 	int sem;
   8675 
   8676 	sem = swfwphysem[sc->sc_funcid];
   8677 	if (wm_get_swfw_semaphore(sc, sem)) {
   8678 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8679 		    __func__);
   8680 		return;
   8681 	}
   8682 
   8683 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8684 
   8685 	wm_put_swfw_semaphore(sc, sem);
   8686 }
   8687 
   8688 /*
   8689  * wm_gmii_gs40g_readreg:	[mii interface function]
   8690  *
   8691  *	Read a PHY register on the I2100 and I211.
   8692  * This could be handled by the PHY layer if we didn't have to lock the
   8693  * ressource ...
   8694  */
   8695 static int
   8696 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8697 {
   8698 	struct wm_softc *sc = device_private(self);
   8699 	int sem;
   8700 	int page, offset;
   8701 	int rv;
   8702 
   8703 	/* Acquire semaphore */
   8704 	sem = swfwphysem[sc->sc_funcid];
   8705 	if (wm_get_swfw_semaphore(sc, sem)) {
   8706 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8707 		    __func__);
   8708 		return 0;
   8709 	}
   8710 
   8711 	/* Page select */
   8712 	page = reg >> GS40G_PAGE_SHIFT;
   8713 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8714 
   8715 	/* Read reg */
   8716 	offset = reg & GS40G_OFFSET_MASK;
   8717 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8718 
   8719 	wm_put_swfw_semaphore(sc, sem);
   8720 	return rv;
   8721 }
   8722 
   8723 /*
   8724  * wm_gmii_gs40g_writereg:	[mii interface function]
   8725  *
   8726  *	Write a PHY register on the I210 and I211.
   8727  * This could be handled by the PHY layer if we didn't have to lock the
   8728  * ressource ...
   8729  */
   8730 static void
   8731 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8732 {
   8733 	struct wm_softc *sc = device_private(self);
   8734 	int sem;
   8735 	int page, offset;
   8736 
   8737 	/* Acquire semaphore */
   8738 	sem = swfwphysem[sc->sc_funcid];
   8739 	if (wm_get_swfw_semaphore(sc, sem)) {
   8740 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8741 		    __func__);
   8742 		return;
   8743 	}
   8744 
   8745 	/* Page select */
   8746 	page = reg >> GS40G_PAGE_SHIFT;
   8747 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8748 
   8749 	/* Write reg */
   8750 	offset = reg & GS40G_OFFSET_MASK;
   8751 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8752 
   8753 	/* Release semaphore */
   8754 	wm_put_swfw_semaphore(sc, sem);
   8755 }
   8756 
   8757 /*
   8758  * wm_gmii_statchg:	[mii interface function]
   8759  *
   8760  *	Callback from MII layer when media changes.
   8761  */
   8762 static void
   8763 wm_gmii_statchg(struct ifnet *ifp)
   8764 {
   8765 	struct wm_softc *sc = ifp->if_softc;
   8766 	struct mii_data *mii = &sc->sc_mii;
   8767 
   8768 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8769 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8770 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8771 
   8772 	/*
   8773 	 * Get flow control negotiation result.
   8774 	 */
   8775 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8776 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8777 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8778 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8779 	}
   8780 
   8781 	if (sc->sc_flowflags & IFM_FLOW) {
   8782 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8783 			sc->sc_ctrl |= CTRL_TFCE;
   8784 			sc->sc_fcrtl |= FCRTL_XONE;
   8785 		}
   8786 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8787 			sc->sc_ctrl |= CTRL_RFCE;
   8788 	}
   8789 
   8790 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8791 		DPRINTF(WM_DEBUG_LINK,
   8792 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8793 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8794 	} else {
   8795 		DPRINTF(WM_DEBUG_LINK,
   8796 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8797 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8798 	}
   8799 
   8800 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8801 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8802 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8803 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8804 	if (sc->sc_type == WM_T_80003) {
   8805 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8806 		case IFM_1000_T:
   8807 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8808 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8809 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8810 			break;
   8811 		default:
   8812 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8813 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8814 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8815 			break;
   8816 		}
   8817 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8818 	}
   8819 }
   8820 
   8821 /*
   8822  * wm_kmrn_readreg:
   8823  *
   8824  *	Read a kumeran register
   8825  */
   8826 static int
   8827 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8828 {
   8829 	int rv;
   8830 
   8831 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8832 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8833 			aprint_error_dev(sc->sc_dev,
   8834 			    "%s: failed to get semaphore\n", __func__);
   8835 			return 0;
   8836 		}
   8837 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8838 		if (wm_get_swfwhw_semaphore(sc)) {
   8839 			aprint_error_dev(sc->sc_dev,
   8840 			    "%s: failed to get semaphore\n", __func__);
   8841 			return 0;
   8842 		}
   8843 	}
   8844 
   8845 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8846 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8847 	    KUMCTRLSTA_REN);
   8848 	CSR_WRITE_FLUSH(sc);
   8849 	delay(2);
   8850 
   8851 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8852 
   8853 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8854 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8855 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8856 		wm_put_swfwhw_semaphore(sc);
   8857 
   8858 	return rv;
   8859 }
   8860 
   8861 /*
   8862  * wm_kmrn_writereg:
   8863  *
   8864  *	Write a kumeran register
   8865  */
   8866 static void
   8867 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8868 {
   8869 
   8870 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8871 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8872 			aprint_error_dev(sc->sc_dev,
   8873 			    "%s: failed to get semaphore\n", __func__);
   8874 			return;
   8875 		}
   8876 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8877 		if (wm_get_swfwhw_semaphore(sc)) {
   8878 			aprint_error_dev(sc->sc_dev,
   8879 			    "%s: failed to get semaphore\n", __func__);
   8880 			return;
   8881 		}
   8882 	}
   8883 
   8884 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8885 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8886 	    (val & KUMCTRLSTA_MASK));
   8887 
   8888 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8889 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8890 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8891 		wm_put_swfwhw_semaphore(sc);
   8892 }
   8893 
   8894 /* SGMII related */
   8895 
   8896 /*
   8897  * wm_sgmii_uses_mdio
   8898  *
   8899  * Check whether the transaction is to the internal PHY or the external
   8900  * MDIO interface. Return true if it's MDIO.
   8901  */
   8902 static bool
   8903 wm_sgmii_uses_mdio(struct wm_softc *sc)
   8904 {
   8905 	uint32_t reg;
   8906 	bool ismdio = false;
   8907 
   8908 	switch (sc->sc_type) {
   8909 	case WM_T_82575:
   8910 	case WM_T_82576:
   8911 		reg = CSR_READ(sc, WMREG_MDIC);
   8912 		ismdio = ((reg & MDIC_DEST) != 0);
   8913 		break;
   8914 	case WM_T_82580:
   8915 	case WM_T_I350:
   8916 	case WM_T_I354:
   8917 	case WM_T_I210:
   8918 	case WM_T_I211:
   8919 		reg = CSR_READ(sc, WMREG_MDICNFG);
   8920 		ismdio = ((reg & MDICNFG_DEST) != 0);
   8921 		break;
   8922 	default:
   8923 		break;
   8924 	}
   8925 
   8926 	return ismdio;
   8927 }
   8928 
   8929 /*
   8930  * wm_sgmii_readreg:	[mii interface function]
   8931  *
   8932  *	Read a PHY register on the SGMII
   8933  * This could be handled by the PHY layer if we didn't have to lock the
   8934  * ressource ...
   8935  */
   8936 static int
   8937 wm_sgmii_readreg(device_t self, int phy, int reg)
   8938 {
   8939 	struct wm_softc *sc = device_private(self);
   8940 	uint32_t i2ccmd;
   8941 	int i, rv;
   8942 
   8943 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8944 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8945 		    __func__);
   8946 		return 0;
   8947 	}
   8948 
   8949 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8950 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8951 	    | I2CCMD_OPCODE_READ;
   8952 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8953 
   8954 	/* Poll the ready bit */
   8955 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8956 		delay(50);
   8957 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8958 		if (i2ccmd & I2CCMD_READY)
   8959 			break;
   8960 	}
   8961 	if ((i2ccmd & I2CCMD_READY) == 0)
   8962 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   8963 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8964 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8965 
   8966 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   8967 
   8968 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   8969 	return rv;
   8970 }
   8971 
   8972 /*
   8973  * wm_sgmii_writereg:	[mii interface function]
   8974  *
   8975  *	Write a PHY register on the SGMII.
   8976  * This could be handled by the PHY layer if we didn't have to lock the
   8977  * ressource ...
   8978  */
   8979 static void
   8980 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   8981 {
   8982 	struct wm_softc *sc = device_private(self);
   8983 	uint32_t i2ccmd;
   8984 	int i;
   8985 	int val_swapped;
   8986 
   8987 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8988 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8989 		    __func__);
   8990 		return;
   8991 	}
   8992 	/* Swap the data bytes for the I2C interface */
   8993 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   8994 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8995 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8996 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   8997 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8998 
   8999 	/* Poll the ready bit */
   9000 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9001 		delay(50);
   9002 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9003 		if (i2ccmd & I2CCMD_READY)
   9004 			break;
   9005 	}
   9006 	if ((i2ccmd & I2CCMD_READY) == 0)
   9007 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9008 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9009 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9010 
   9011 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9012 }
   9013 
   9014 /* TBI related */
   9015 
   9016 /*
   9017  * wm_tbi_mediainit:
   9018  *
   9019  *	Initialize media for use on 1000BASE-X devices.
   9020  */
   9021 static void
   9022 wm_tbi_mediainit(struct wm_softc *sc)
   9023 {
   9024 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9025 	const char *sep = "";
   9026 
   9027 	if (sc->sc_type < WM_T_82543)
   9028 		sc->sc_tipg = TIPG_WM_DFLT;
   9029 	else
   9030 		sc->sc_tipg = TIPG_LG_DFLT;
   9031 
   9032 	sc->sc_tbi_serdes_anegticks = 5;
   9033 
   9034 	/* Initialize our media structures */
   9035 	sc->sc_mii.mii_ifp = ifp;
   9036 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9037 
   9038 	if ((sc->sc_type >= WM_T_82575)
   9039 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9040 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9041 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9042 	else
   9043 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9044 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9045 
   9046 	/*
   9047 	 * SWD Pins:
   9048 	 *
   9049 	 *	0 = Link LED (output)
   9050 	 *	1 = Loss Of Signal (input)
   9051 	 */
   9052 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9053 
   9054 	/* XXX Perhaps this is only for TBI */
   9055 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9056 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9057 
   9058 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9059 		sc->sc_ctrl &= ~CTRL_LRST;
   9060 
   9061 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9062 
   9063 #define	ADD(ss, mm, dd)							\
   9064 do {									\
   9065 	aprint_normal("%s%s", sep, ss);					\
   9066 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   9067 	sep = ", ";							\
   9068 } while (/*CONSTCOND*/0)
   9069 
   9070 	aprint_normal_dev(sc->sc_dev, "");
   9071 
   9072 	/* Only 82545 is LX */
   9073 	if (sc->sc_type == WM_T_82545) {
   9074 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9075 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   9076 	} else {
   9077 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9078 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   9079 	}
   9080 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   9081 	aprint_normal("\n");
   9082 
   9083 #undef ADD
   9084 
   9085 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9086 }
   9087 
   9088 /*
   9089  * wm_tbi_mediachange:	[ifmedia interface function]
   9090  *
   9091  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9092  */
   9093 static int
   9094 wm_tbi_mediachange(struct ifnet *ifp)
   9095 {
   9096 	struct wm_softc *sc = ifp->if_softc;
   9097 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9098 	uint32_t status;
   9099 	int i;
   9100 
   9101 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9102 		/* XXX need some work for >= 82571 and < 82575 */
   9103 		if (sc->sc_type < WM_T_82575)
   9104 			return 0;
   9105 	}
   9106 
   9107 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9108 	    || (sc->sc_type >= WM_T_82575))
   9109 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9110 
   9111 	sc->sc_ctrl &= ~CTRL_LRST;
   9112 	sc->sc_txcw = TXCW_ANE;
   9113 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9114 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9115 	else if (ife->ifm_media & IFM_FDX)
   9116 		sc->sc_txcw |= TXCW_FD;
   9117 	else
   9118 		sc->sc_txcw |= TXCW_HD;
   9119 
   9120 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9121 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9122 
   9123 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9124 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9125 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9126 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9127 	CSR_WRITE_FLUSH(sc);
   9128 	delay(1000);
   9129 
   9130 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9131 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9132 
   9133 	/*
   9134 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9135 	 * optics detect a signal, 0 if they don't.
   9136 	 */
   9137 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9138 		/* Have signal; wait for the link to come up. */
   9139 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9140 			delay(10000);
   9141 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9142 				break;
   9143 		}
   9144 
   9145 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9146 			    device_xname(sc->sc_dev),i));
   9147 
   9148 		status = CSR_READ(sc, WMREG_STATUS);
   9149 		DPRINTF(WM_DEBUG_LINK,
   9150 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9151 			device_xname(sc->sc_dev),status, STATUS_LU));
   9152 		if (status & STATUS_LU) {
   9153 			/* Link is up. */
   9154 			DPRINTF(WM_DEBUG_LINK,
   9155 			    ("%s: LINK: set media -> link up %s\n",
   9156 			    device_xname(sc->sc_dev),
   9157 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9158 
   9159 			/*
   9160 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9161 			 * so we should update sc->sc_ctrl
   9162 			 */
   9163 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9164 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9165 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9166 			if (status & STATUS_FD)
   9167 				sc->sc_tctl |=
   9168 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9169 			else
   9170 				sc->sc_tctl |=
   9171 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9172 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9173 				sc->sc_fcrtl |= FCRTL_XONE;
   9174 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9175 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9176 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9177 				      sc->sc_fcrtl);
   9178 			sc->sc_tbi_linkup = 1;
   9179 		} else {
   9180 			if (i == WM_LINKUP_TIMEOUT)
   9181 				wm_check_for_link(sc);
   9182 			/* Link is down. */
   9183 			DPRINTF(WM_DEBUG_LINK,
   9184 			    ("%s: LINK: set media -> link down\n",
   9185 			    device_xname(sc->sc_dev)));
   9186 			sc->sc_tbi_linkup = 0;
   9187 		}
   9188 	} else {
   9189 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9190 		    device_xname(sc->sc_dev)));
   9191 		sc->sc_tbi_linkup = 0;
   9192 	}
   9193 
   9194 	wm_tbi_serdes_set_linkled(sc);
   9195 
   9196 	return 0;
   9197 }
   9198 
   9199 /*
   9200  * wm_tbi_mediastatus:	[ifmedia interface function]
   9201  *
   9202  *	Get the current interface media status on a 1000BASE-X device.
   9203  */
   9204 static void
   9205 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9206 {
   9207 	struct wm_softc *sc = ifp->if_softc;
   9208 	uint32_t ctrl, status;
   9209 
   9210 	ifmr->ifm_status = IFM_AVALID;
   9211 	ifmr->ifm_active = IFM_ETHER;
   9212 
   9213 	status = CSR_READ(sc, WMREG_STATUS);
   9214 	if ((status & STATUS_LU) == 0) {
   9215 		ifmr->ifm_active |= IFM_NONE;
   9216 		return;
   9217 	}
   9218 
   9219 	ifmr->ifm_status |= IFM_ACTIVE;
   9220 	/* Only 82545 is LX */
   9221 	if (sc->sc_type == WM_T_82545)
   9222 		ifmr->ifm_active |= IFM_1000_LX;
   9223 	else
   9224 		ifmr->ifm_active |= IFM_1000_SX;
   9225 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9226 		ifmr->ifm_active |= IFM_FDX;
   9227 	else
   9228 		ifmr->ifm_active |= IFM_HDX;
   9229 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9230 	if (ctrl & CTRL_RFCE)
   9231 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9232 	if (ctrl & CTRL_TFCE)
   9233 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9234 }
   9235 
   9236 /* XXX TBI only */
   9237 static int
   9238 wm_check_for_link(struct wm_softc *sc)
   9239 {
   9240 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9241 	uint32_t rxcw;
   9242 	uint32_t ctrl;
   9243 	uint32_t status;
   9244 	uint32_t sig;
   9245 
   9246 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9247 		/* XXX need some work for >= 82571 */
   9248 		if (sc->sc_type >= WM_T_82571) {
   9249 			sc->sc_tbi_linkup = 1;
   9250 			return 0;
   9251 		}
   9252 	}
   9253 
   9254 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9255 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9256 	status = CSR_READ(sc, WMREG_STATUS);
   9257 
   9258 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9259 
   9260 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9261 		device_xname(sc->sc_dev), __func__,
   9262 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9263 		((status & STATUS_LU) != 0),
   9264 		((rxcw & RXCW_C) != 0)
   9265 		    ));
   9266 
   9267 	/*
   9268 	 * SWDPIN   LU RXCW
   9269 	 *      0    0    0
   9270 	 *      0    0    1	(should not happen)
   9271 	 *      0    1    0	(should not happen)
   9272 	 *      0    1    1	(should not happen)
   9273 	 *      1    0    0	Disable autonego and force linkup
   9274 	 *      1    0    1	got /C/ but not linkup yet
   9275 	 *      1    1    0	(linkup)
   9276 	 *      1    1    1	If IFM_AUTO, back to autonego
   9277 	 *
   9278 	 */
   9279 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9280 	    && ((status & STATUS_LU) == 0)
   9281 	    && ((rxcw & RXCW_C) == 0)) {
   9282 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9283 			__func__));
   9284 		sc->sc_tbi_linkup = 0;
   9285 		/* Disable auto-negotiation in the TXCW register */
   9286 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9287 
   9288 		/*
   9289 		 * Force link-up and also force full-duplex.
   9290 		 *
   9291 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9292 		 * so we should update sc->sc_ctrl
   9293 		 */
   9294 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9295 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9296 	} else if (((status & STATUS_LU) != 0)
   9297 	    && ((rxcw & RXCW_C) != 0)
   9298 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9299 		sc->sc_tbi_linkup = 1;
   9300 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9301 			__func__));
   9302 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9303 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9304 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9305 	    && ((rxcw & RXCW_C) != 0)) {
   9306 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9307 	} else {
   9308 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9309 			status));
   9310 	}
   9311 
   9312 	return 0;
   9313 }
   9314 
   9315 /*
   9316  * wm_tbi_tick:
   9317  *
   9318  *	Check the link on TBI devices.
   9319  *	This function acts as mii_tick().
   9320  */
   9321 static void
   9322 wm_tbi_tick(struct wm_softc *sc)
   9323 {
   9324 	struct mii_data *mii = &sc->sc_mii;
   9325 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9326 	uint32_t status;
   9327 
   9328 	KASSERT(WM_CORE_LOCKED(sc));
   9329 
   9330 	status = CSR_READ(sc, WMREG_STATUS);
   9331 
   9332 	/* XXX is this needed? */
   9333 	(void)CSR_READ(sc, WMREG_RXCW);
   9334 	(void)CSR_READ(sc, WMREG_CTRL);
   9335 
   9336 	/* set link status */
   9337 	if ((status & STATUS_LU) == 0) {
   9338 		DPRINTF(WM_DEBUG_LINK,
   9339 		    ("%s: LINK: checklink -> down\n",
   9340 			device_xname(sc->sc_dev)));
   9341 		sc->sc_tbi_linkup = 0;
   9342 	} else if (sc->sc_tbi_linkup == 0) {
   9343 		DPRINTF(WM_DEBUG_LINK,
   9344 		    ("%s: LINK: checklink -> up %s\n",
   9345 			device_xname(sc->sc_dev),
   9346 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9347 		sc->sc_tbi_linkup = 1;
   9348 		sc->sc_tbi_serdes_ticks = 0;
   9349 	}
   9350 
   9351 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9352 		goto setled;
   9353 
   9354 	if ((status & STATUS_LU) == 0) {
   9355 		sc->sc_tbi_linkup = 0;
   9356 		/* If the timer expired, retry autonegotiation */
   9357 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9358 		    && (++sc->sc_tbi_serdes_ticks
   9359 			>= sc->sc_tbi_serdes_anegticks)) {
   9360 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9361 			sc->sc_tbi_serdes_ticks = 0;
   9362 			/*
   9363 			 * Reset the link, and let autonegotiation do
   9364 			 * its thing
   9365 			 */
   9366 			sc->sc_ctrl |= CTRL_LRST;
   9367 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9368 			CSR_WRITE_FLUSH(sc);
   9369 			delay(1000);
   9370 			sc->sc_ctrl &= ~CTRL_LRST;
   9371 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9372 			CSR_WRITE_FLUSH(sc);
   9373 			delay(1000);
   9374 			CSR_WRITE(sc, WMREG_TXCW,
   9375 			    sc->sc_txcw & ~TXCW_ANE);
   9376 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9377 		}
   9378 	}
   9379 
   9380 setled:
   9381 	wm_tbi_serdes_set_linkled(sc);
   9382 }
   9383 
   9384 /* SERDES related */
   9385 static void
   9386 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9387 {
   9388 	uint32_t reg;
   9389 
   9390 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9391 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9392 		return;
   9393 
   9394 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9395 	reg |= PCS_CFG_PCS_EN;
   9396 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9397 
   9398 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9399 	reg &= ~CTRL_EXT_SWDPIN(3);
   9400 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9401 	CSR_WRITE_FLUSH(sc);
   9402 }
   9403 
   9404 static int
   9405 wm_serdes_mediachange(struct ifnet *ifp)
   9406 {
   9407 	struct wm_softc *sc = ifp->if_softc;
   9408 	bool pcs_autoneg = true; /* XXX */
   9409 	uint32_t ctrl_ext, pcs_lctl, reg;
   9410 
   9411 	/* XXX Currently, this function is not called on 8257[12] */
   9412 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9413 	    || (sc->sc_type >= WM_T_82575))
   9414 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9415 
   9416 	wm_serdes_power_up_link_82575(sc);
   9417 
   9418 	sc->sc_ctrl |= CTRL_SLU;
   9419 
   9420 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9421 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9422 
   9423 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9424 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9425 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9426 	case CTRL_EXT_LINK_MODE_SGMII:
   9427 		pcs_autoneg = true;
   9428 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9429 		break;
   9430 	case CTRL_EXT_LINK_MODE_1000KX:
   9431 		pcs_autoneg = false;
   9432 		/* FALLTHROUGH */
   9433 	default:
   9434 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   9435 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9436 				pcs_autoneg = false;
   9437 		}
   9438 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9439 		    | CTRL_FRCFDX;
   9440 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9441 	}
   9442 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9443 
   9444 	if (pcs_autoneg) {
   9445 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9446 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9447 
   9448 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9449 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9450 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9451 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9452 	} else
   9453 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9454 
   9455 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9456 
   9457 
   9458 	return 0;
   9459 }
   9460 
   9461 static void
   9462 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9463 {
   9464 	struct wm_softc *sc = ifp->if_softc;
   9465 	struct mii_data *mii = &sc->sc_mii;
   9466 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9467 	uint32_t pcs_adv, pcs_lpab, reg;
   9468 
   9469 	ifmr->ifm_status = IFM_AVALID;
   9470 	ifmr->ifm_active = IFM_ETHER;
   9471 
   9472 	/* Check PCS */
   9473 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9474 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9475 		ifmr->ifm_active |= IFM_NONE;
   9476 		sc->sc_tbi_linkup = 0;
   9477 		goto setled;
   9478 	}
   9479 
   9480 	sc->sc_tbi_linkup = 1;
   9481 	ifmr->ifm_status |= IFM_ACTIVE;
   9482 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9483 	if ((reg & PCS_LSTS_FDX) != 0)
   9484 		ifmr->ifm_active |= IFM_FDX;
   9485 	else
   9486 		ifmr->ifm_active |= IFM_HDX;
   9487 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9488 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9489 		/* Check flow */
   9490 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9491 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9492 			printf("XXX LINKOK but not ACOMP\n");
   9493 			goto setled;
   9494 		}
   9495 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9496 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9497 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   9498 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9499 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9500 			mii->mii_media_active |= IFM_FLOW
   9501 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9502 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9503 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9504 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9505 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9506 			mii->mii_media_active |= IFM_FLOW
   9507 			    | IFM_ETH_TXPAUSE;
   9508 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9509 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9510 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9511 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9512 			mii->mii_media_active |= IFM_FLOW
   9513 			    | IFM_ETH_RXPAUSE;
   9514 		} else {
   9515 		}
   9516 	}
   9517 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9518 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9519 setled:
   9520 	wm_tbi_serdes_set_linkled(sc);
   9521 }
   9522 
   9523 /*
   9524  * wm_serdes_tick:
   9525  *
   9526  *	Check the link on serdes devices.
   9527  */
   9528 static void
   9529 wm_serdes_tick(struct wm_softc *sc)
   9530 {
   9531 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9532 	struct mii_data *mii = &sc->sc_mii;
   9533 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9534 	uint32_t reg;
   9535 
   9536 	KASSERT(WM_CORE_LOCKED(sc));
   9537 
   9538 	mii->mii_media_status = IFM_AVALID;
   9539 	mii->mii_media_active = IFM_ETHER;
   9540 
   9541 	/* Check PCS */
   9542 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9543 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9544 		mii->mii_media_status |= IFM_ACTIVE;
   9545 		sc->sc_tbi_linkup = 1;
   9546 		sc->sc_tbi_serdes_ticks = 0;
   9547 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9548 		if ((reg & PCS_LSTS_FDX) != 0)
   9549 			mii->mii_media_active |= IFM_FDX;
   9550 		else
   9551 			mii->mii_media_active |= IFM_HDX;
   9552 	} else {
   9553 		mii->mii_media_status |= IFM_NONE;
   9554 		sc->sc_tbi_linkup = 0;
   9555 		    /* If the timer expired, retry autonegotiation */
   9556 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9557 		    && (++sc->sc_tbi_serdes_ticks
   9558 			>= sc->sc_tbi_serdes_anegticks)) {
   9559 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9560 			sc->sc_tbi_serdes_ticks = 0;
   9561 			/* XXX */
   9562 			wm_serdes_mediachange(ifp);
   9563 		}
   9564 	}
   9565 
   9566 	wm_tbi_serdes_set_linkled(sc);
   9567 }
   9568 
   9569 /* SFP related */
   9570 
   9571 static int
   9572 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9573 {
   9574 	uint32_t i2ccmd;
   9575 	int i;
   9576 
   9577 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9578 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9579 
   9580 	/* Poll the ready bit */
   9581 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9582 		delay(50);
   9583 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9584 		if (i2ccmd & I2CCMD_READY)
   9585 			break;
   9586 	}
   9587 	if ((i2ccmd & I2CCMD_READY) == 0)
   9588 		return -1;
   9589 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9590 		return -1;
   9591 
   9592 	*data = i2ccmd & 0x00ff;
   9593 
   9594 	return 0;
   9595 }
   9596 
   9597 static uint32_t
   9598 wm_sfp_get_media_type(struct wm_softc *sc)
   9599 {
   9600 	uint32_t ctrl_ext;
   9601 	uint8_t val = 0;
   9602 	int timeout = 3;
   9603 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9604 	int rv = -1;
   9605 
   9606 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9607 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9608 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9609 	CSR_WRITE_FLUSH(sc);
   9610 
   9611 	/* Read SFP module data */
   9612 	while (timeout) {
   9613 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9614 		if (rv == 0)
   9615 			break;
   9616 		delay(100*1000); /* XXX too big */
   9617 		timeout--;
   9618 	}
   9619 	if (rv != 0)
   9620 		goto out;
   9621 	switch (val) {
   9622 	case SFF_SFP_ID_SFF:
   9623 		aprint_normal_dev(sc->sc_dev,
   9624 		    "Module/Connector soldered to board\n");
   9625 		break;
   9626 	case SFF_SFP_ID_SFP:
   9627 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9628 		break;
   9629 	case SFF_SFP_ID_UNKNOWN:
   9630 		goto out;
   9631 	default:
   9632 		break;
   9633 	}
   9634 
   9635 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9636 	if (rv != 0) {
   9637 		goto out;
   9638 	}
   9639 
   9640 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9641 		mediatype = WM_MEDIATYPE_SERDES;
   9642 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9643 		sc->sc_flags |= WM_F_SGMII;
   9644 		mediatype = WM_MEDIATYPE_COPPER;
   9645 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9646 		sc->sc_flags |= WM_F_SGMII;
   9647 		mediatype = WM_MEDIATYPE_SERDES;
   9648 	}
   9649 
   9650 out:
   9651 	/* Restore I2C interface setting */
   9652 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9653 
   9654 	return mediatype;
   9655 }
   9656 /*
   9657  * NVM related.
   9658  * Microwire, SPI (w/wo EERD) and Flash.
   9659  */
   9660 
   9661 /* Both spi and uwire */
   9662 
   9663 /*
   9664  * wm_eeprom_sendbits:
   9665  *
   9666  *	Send a series of bits to the EEPROM.
   9667  */
   9668 static void
   9669 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9670 {
   9671 	uint32_t reg;
   9672 	int x;
   9673 
   9674 	reg = CSR_READ(sc, WMREG_EECD);
   9675 
   9676 	for (x = nbits; x > 0; x--) {
   9677 		if (bits & (1U << (x - 1)))
   9678 			reg |= EECD_DI;
   9679 		else
   9680 			reg &= ~EECD_DI;
   9681 		CSR_WRITE(sc, WMREG_EECD, reg);
   9682 		CSR_WRITE_FLUSH(sc);
   9683 		delay(2);
   9684 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9685 		CSR_WRITE_FLUSH(sc);
   9686 		delay(2);
   9687 		CSR_WRITE(sc, WMREG_EECD, reg);
   9688 		CSR_WRITE_FLUSH(sc);
   9689 		delay(2);
   9690 	}
   9691 }
   9692 
   9693 /*
   9694  * wm_eeprom_recvbits:
   9695  *
   9696  *	Receive a series of bits from the EEPROM.
   9697  */
   9698 static void
   9699 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9700 {
   9701 	uint32_t reg, val;
   9702 	int x;
   9703 
   9704 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9705 
   9706 	val = 0;
   9707 	for (x = nbits; x > 0; x--) {
   9708 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9709 		CSR_WRITE_FLUSH(sc);
   9710 		delay(2);
   9711 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9712 			val |= (1U << (x - 1));
   9713 		CSR_WRITE(sc, WMREG_EECD, reg);
   9714 		CSR_WRITE_FLUSH(sc);
   9715 		delay(2);
   9716 	}
   9717 	*valp = val;
   9718 }
   9719 
   9720 /* Microwire */
   9721 
   9722 /*
   9723  * wm_nvm_read_uwire:
   9724  *
   9725  *	Read a word from the EEPROM using the MicroWire protocol.
   9726  */
   9727 static int
   9728 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9729 {
   9730 	uint32_t reg, val;
   9731 	int i;
   9732 
   9733 	for (i = 0; i < wordcnt; i++) {
   9734 		/* Clear SK and DI. */
   9735 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9736 		CSR_WRITE(sc, WMREG_EECD, reg);
   9737 
   9738 		/*
   9739 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9740 		 * and Xen.
   9741 		 *
   9742 		 * We use this workaround only for 82540 because qemu's
   9743 		 * e1000 act as 82540.
   9744 		 */
   9745 		if (sc->sc_type == WM_T_82540) {
   9746 			reg |= EECD_SK;
   9747 			CSR_WRITE(sc, WMREG_EECD, reg);
   9748 			reg &= ~EECD_SK;
   9749 			CSR_WRITE(sc, WMREG_EECD, reg);
   9750 			CSR_WRITE_FLUSH(sc);
   9751 			delay(2);
   9752 		}
   9753 		/* XXX: end of workaround */
   9754 
   9755 		/* Set CHIP SELECT. */
   9756 		reg |= EECD_CS;
   9757 		CSR_WRITE(sc, WMREG_EECD, reg);
   9758 		CSR_WRITE_FLUSH(sc);
   9759 		delay(2);
   9760 
   9761 		/* Shift in the READ command. */
   9762 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9763 
   9764 		/* Shift in address. */
   9765 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9766 
   9767 		/* Shift out the data. */
   9768 		wm_eeprom_recvbits(sc, &val, 16);
   9769 		data[i] = val & 0xffff;
   9770 
   9771 		/* Clear CHIP SELECT. */
   9772 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9773 		CSR_WRITE(sc, WMREG_EECD, reg);
   9774 		CSR_WRITE_FLUSH(sc);
   9775 		delay(2);
   9776 	}
   9777 
   9778 	return 0;
   9779 }
   9780 
   9781 /* SPI */
   9782 
   9783 /*
   9784  * Set SPI and FLASH related information from the EECD register.
   9785  * For 82541 and 82547, the word size is taken from EEPROM.
   9786  */
   9787 static int
   9788 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9789 {
   9790 	int size;
   9791 	uint32_t reg;
   9792 	uint16_t data;
   9793 
   9794 	reg = CSR_READ(sc, WMREG_EECD);
   9795 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9796 
   9797 	/* Read the size of NVM from EECD by default */
   9798 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9799 	switch (sc->sc_type) {
   9800 	case WM_T_82541:
   9801 	case WM_T_82541_2:
   9802 	case WM_T_82547:
   9803 	case WM_T_82547_2:
   9804 		/* Set dummy value to access EEPROM */
   9805 		sc->sc_nvm_wordsize = 64;
   9806 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9807 		reg = data;
   9808 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9809 		if (size == 0)
   9810 			size = 6; /* 64 word size */
   9811 		else
   9812 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9813 		break;
   9814 	case WM_T_80003:
   9815 	case WM_T_82571:
   9816 	case WM_T_82572:
   9817 	case WM_T_82573: /* SPI case */
   9818 	case WM_T_82574: /* SPI case */
   9819 	case WM_T_82583: /* SPI case */
   9820 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9821 		if (size > 14)
   9822 			size = 14;
   9823 		break;
   9824 	case WM_T_82575:
   9825 	case WM_T_82576:
   9826 	case WM_T_82580:
   9827 	case WM_T_I350:
   9828 	case WM_T_I354:
   9829 	case WM_T_I210:
   9830 	case WM_T_I211:
   9831 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9832 		if (size > 15)
   9833 			size = 15;
   9834 		break;
   9835 	default:
   9836 		aprint_error_dev(sc->sc_dev,
   9837 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9838 		return -1;
   9839 		break;
   9840 	}
   9841 
   9842 	sc->sc_nvm_wordsize = 1 << size;
   9843 
   9844 	return 0;
   9845 }
   9846 
   9847 /*
   9848  * wm_nvm_ready_spi:
   9849  *
   9850  *	Wait for a SPI EEPROM to be ready for commands.
   9851  */
   9852 static int
   9853 wm_nvm_ready_spi(struct wm_softc *sc)
   9854 {
   9855 	uint32_t val;
   9856 	int usec;
   9857 
   9858 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9859 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9860 		wm_eeprom_recvbits(sc, &val, 8);
   9861 		if ((val & SPI_SR_RDY) == 0)
   9862 			break;
   9863 	}
   9864 	if (usec >= SPI_MAX_RETRIES) {
   9865 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   9866 		return 1;
   9867 	}
   9868 	return 0;
   9869 }
   9870 
   9871 /*
   9872  * wm_nvm_read_spi:
   9873  *
   9874  *	Read a work from the EEPROM using the SPI protocol.
   9875  */
   9876 static int
   9877 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9878 {
   9879 	uint32_t reg, val;
   9880 	int i;
   9881 	uint8_t opc;
   9882 
   9883 	/* Clear SK and CS. */
   9884 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   9885 	CSR_WRITE(sc, WMREG_EECD, reg);
   9886 	CSR_WRITE_FLUSH(sc);
   9887 	delay(2);
   9888 
   9889 	if (wm_nvm_ready_spi(sc))
   9890 		return 1;
   9891 
   9892 	/* Toggle CS to flush commands. */
   9893 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   9894 	CSR_WRITE_FLUSH(sc);
   9895 	delay(2);
   9896 	CSR_WRITE(sc, WMREG_EECD, reg);
   9897 	CSR_WRITE_FLUSH(sc);
   9898 	delay(2);
   9899 
   9900 	opc = SPI_OPC_READ;
   9901 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   9902 		opc |= SPI_OPC_A8;
   9903 
   9904 	wm_eeprom_sendbits(sc, opc, 8);
   9905 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   9906 
   9907 	for (i = 0; i < wordcnt; i++) {
   9908 		wm_eeprom_recvbits(sc, &val, 16);
   9909 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   9910 	}
   9911 
   9912 	/* Raise CS and clear SK. */
   9913 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   9914 	CSR_WRITE(sc, WMREG_EECD, reg);
   9915 	CSR_WRITE_FLUSH(sc);
   9916 	delay(2);
   9917 
   9918 	return 0;
   9919 }
   9920 
   9921 /* Using with EERD */
   9922 
   9923 static int
   9924 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   9925 {
   9926 	uint32_t attempts = 100000;
   9927 	uint32_t i, reg = 0;
   9928 	int32_t done = -1;
   9929 
   9930 	for (i = 0; i < attempts; i++) {
   9931 		reg = CSR_READ(sc, rw);
   9932 
   9933 		if (reg & EERD_DONE) {
   9934 			done = 0;
   9935 			break;
   9936 		}
   9937 		delay(5);
   9938 	}
   9939 
   9940 	return done;
   9941 }
   9942 
   9943 static int
   9944 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   9945     uint16_t *data)
   9946 {
   9947 	int i, eerd = 0;
   9948 	int error = 0;
   9949 
   9950 	for (i = 0; i < wordcnt; i++) {
   9951 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   9952 
   9953 		CSR_WRITE(sc, WMREG_EERD, eerd);
   9954 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   9955 		if (error != 0)
   9956 			break;
   9957 
   9958 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   9959 	}
   9960 
   9961 	return error;
   9962 }
   9963 
   9964 /* Flash */
   9965 
   9966 static int
   9967 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   9968 {
   9969 	uint32_t eecd;
   9970 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   9971 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   9972 	uint8_t sig_byte = 0;
   9973 
   9974 	switch (sc->sc_type) {
   9975 	case WM_T_ICH8:
   9976 	case WM_T_ICH9:
   9977 		eecd = CSR_READ(sc, WMREG_EECD);
   9978 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   9979 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   9980 			return 0;
   9981 		}
   9982 		/* FALLTHROUGH */
   9983 	default:
   9984 		/* Default to 0 */
   9985 		*bank = 0;
   9986 
   9987 		/* Check bank 0 */
   9988 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   9989 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9990 			*bank = 0;
   9991 			return 0;
   9992 		}
   9993 
   9994 		/* Check bank 1 */
   9995 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   9996 		    &sig_byte);
   9997 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9998 			*bank = 1;
   9999 			return 0;
   10000 		}
   10001 	}
   10002 
   10003 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10004 		device_xname(sc->sc_dev)));
   10005 	return -1;
   10006 }
   10007 
   10008 /******************************************************************************
   10009  * This function does initial flash setup so that a new read/write/erase cycle
   10010  * can be started.
   10011  *
   10012  * sc - The pointer to the hw structure
   10013  ****************************************************************************/
   10014 static int32_t
   10015 wm_ich8_cycle_init(struct wm_softc *sc)
   10016 {
   10017 	uint16_t hsfsts;
   10018 	int32_t error = 1;
   10019 	int32_t i     = 0;
   10020 
   10021 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10022 
   10023 	/* May be check the Flash Des Valid bit in Hw status */
   10024 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10025 		return error;
   10026 	}
   10027 
   10028 	/* Clear FCERR in Hw status by writing 1 */
   10029 	/* Clear DAEL in Hw status by writing a 1 */
   10030 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10031 
   10032 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10033 
   10034 	/*
   10035 	 * Either we should have a hardware SPI cycle in progress bit to check
   10036 	 * against, in order to start a new cycle or FDONE bit should be
   10037 	 * changed in the hardware so that it is 1 after harware reset, which
   10038 	 * can then be used as an indication whether a cycle is in progress or
   10039 	 * has been completed .. we should also have some software semaphore
   10040 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10041 	 * threads access to those bits can be sequentiallized or a way so that
   10042 	 * 2 threads dont start the cycle at the same time
   10043 	 */
   10044 
   10045 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10046 		/*
   10047 		 * There is no cycle running at present, so we can start a
   10048 		 * cycle
   10049 		 */
   10050 
   10051 		/* Begin by setting Flash Cycle Done. */
   10052 		hsfsts |= HSFSTS_DONE;
   10053 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10054 		error = 0;
   10055 	} else {
   10056 		/*
   10057 		 * otherwise poll for sometime so the current cycle has a
   10058 		 * chance to end before giving up.
   10059 		 */
   10060 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10061 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10062 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10063 				error = 0;
   10064 				break;
   10065 			}
   10066 			delay(1);
   10067 		}
   10068 		if (error == 0) {
   10069 			/*
   10070 			 * Successful in waiting for previous cycle to timeout,
   10071 			 * now set the Flash Cycle Done.
   10072 			 */
   10073 			hsfsts |= HSFSTS_DONE;
   10074 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10075 		}
   10076 	}
   10077 	return error;
   10078 }
   10079 
   10080 /******************************************************************************
   10081  * This function starts a flash cycle and waits for its completion
   10082  *
   10083  * sc - The pointer to the hw structure
   10084  ****************************************************************************/
   10085 static int32_t
   10086 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10087 {
   10088 	uint16_t hsflctl;
   10089 	uint16_t hsfsts;
   10090 	int32_t error = 1;
   10091 	uint32_t i = 0;
   10092 
   10093 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10094 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10095 	hsflctl |= HSFCTL_GO;
   10096 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10097 
   10098 	/* Wait till FDONE bit is set to 1 */
   10099 	do {
   10100 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10101 		if (hsfsts & HSFSTS_DONE)
   10102 			break;
   10103 		delay(1);
   10104 		i++;
   10105 	} while (i < timeout);
   10106 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10107 		error = 0;
   10108 
   10109 	return error;
   10110 }
   10111 
   10112 /******************************************************************************
   10113  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   10114  *
   10115  * sc - The pointer to the hw structure
   10116  * index - The index of the byte or word to read.
   10117  * size - Size of data to read, 1=byte 2=word
   10118  * data - Pointer to the word to store the value read.
   10119  *****************************************************************************/
   10120 static int32_t
   10121 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10122     uint32_t size, uint16_t *data)
   10123 {
   10124 	uint16_t hsfsts;
   10125 	uint16_t hsflctl;
   10126 	uint32_t flash_linear_address;
   10127 	uint32_t flash_data = 0;
   10128 	int32_t error = 1;
   10129 	int32_t count = 0;
   10130 
   10131 	if (size < 1  || size > 2 || data == 0x0 ||
   10132 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10133 		return error;
   10134 
   10135 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10136 	    sc->sc_ich8_flash_base;
   10137 
   10138 	do {
   10139 		delay(1);
   10140 		/* Steps */
   10141 		error = wm_ich8_cycle_init(sc);
   10142 		if (error)
   10143 			break;
   10144 
   10145 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10146 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10147 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10148 		    & HSFCTL_BCOUNT_MASK;
   10149 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10150 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10151 
   10152 		/*
   10153 		 * Write the last 24 bits of index into Flash Linear address
   10154 		 * field in Flash Address
   10155 		 */
   10156 		/* TODO: TBD maybe check the index against the size of flash */
   10157 
   10158 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10159 
   10160 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10161 
   10162 		/*
   10163 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10164 		 * the whole sequence a few more times, else read in (shift in)
   10165 		 * the Flash Data0, the order is least significant byte first
   10166 		 * msb to lsb
   10167 		 */
   10168 		if (error == 0) {
   10169 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10170 			if (size == 1)
   10171 				*data = (uint8_t)(flash_data & 0x000000FF);
   10172 			else if (size == 2)
   10173 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10174 			break;
   10175 		} else {
   10176 			/*
   10177 			 * If we've gotten here, then things are probably
   10178 			 * completely hosed, but if the error condition is
   10179 			 * detected, it won't hurt to give it another try...
   10180 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10181 			 */
   10182 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10183 			if (hsfsts & HSFSTS_ERR) {
   10184 				/* Repeat for some time before giving up. */
   10185 				continue;
   10186 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10187 				break;
   10188 		}
   10189 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10190 
   10191 	return error;
   10192 }
   10193 
   10194 /******************************************************************************
   10195  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10196  *
   10197  * sc - pointer to wm_hw structure
   10198  * index - The index of the byte to read.
   10199  * data - Pointer to a byte to store the value read.
   10200  *****************************************************************************/
   10201 static int32_t
   10202 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10203 {
   10204 	int32_t status;
   10205 	uint16_t word = 0;
   10206 
   10207 	status = wm_read_ich8_data(sc, index, 1, &word);
   10208 	if (status == 0)
   10209 		*data = (uint8_t)word;
   10210 	else
   10211 		*data = 0;
   10212 
   10213 	return status;
   10214 }
   10215 
   10216 /******************************************************************************
   10217  * Reads a word from the NVM using the ICH8 flash access registers.
   10218  *
   10219  * sc - pointer to wm_hw structure
   10220  * index - The starting byte index of the word to read.
   10221  * data - Pointer to a word to store the value read.
   10222  *****************************************************************************/
   10223 static int32_t
   10224 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10225 {
   10226 	int32_t status;
   10227 
   10228 	status = wm_read_ich8_data(sc, index, 2, data);
   10229 	return status;
   10230 }
   10231 
   10232 /******************************************************************************
   10233  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10234  * register.
   10235  *
   10236  * sc - Struct containing variables accessed by shared code
   10237  * offset - offset of word in the EEPROM to read
   10238  * data - word read from the EEPROM
   10239  * words - number of words to read
   10240  *****************************************************************************/
   10241 static int
   10242 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10243 {
   10244 	int32_t  error = 0;
   10245 	uint32_t flash_bank = 0;
   10246 	uint32_t act_offset = 0;
   10247 	uint32_t bank_offset = 0;
   10248 	uint16_t word = 0;
   10249 	uint16_t i = 0;
   10250 
   10251 	/*
   10252 	 * We need to know which is the valid flash bank.  In the event
   10253 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10254 	 * managing flash_bank.  So it cannot be trusted and needs
   10255 	 * to be updated with each read.
   10256 	 */
   10257 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10258 	if (error) {
   10259 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10260 			device_xname(sc->sc_dev)));
   10261 		flash_bank = 0;
   10262 	}
   10263 
   10264 	/*
   10265 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10266 	 * size
   10267 	 */
   10268 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10269 
   10270 	error = wm_get_swfwhw_semaphore(sc);
   10271 	if (error) {
   10272 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10273 		    __func__);
   10274 		return error;
   10275 	}
   10276 
   10277 	for (i = 0; i < words; i++) {
   10278 		/* The NVM part needs a byte offset, hence * 2 */
   10279 		act_offset = bank_offset + ((offset + i) * 2);
   10280 		error = wm_read_ich8_word(sc, act_offset, &word);
   10281 		if (error) {
   10282 			aprint_error_dev(sc->sc_dev,
   10283 			    "%s: failed to read NVM\n", __func__);
   10284 			break;
   10285 		}
   10286 		data[i] = word;
   10287 	}
   10288 
   10289 	wm_put_swfwhw_semaphore(sc);
   10290 	return error;
   10291 }
   10292 
   10293 /* iNVM */
   10294 
   10295 static int
   10296 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10297 {
   10298 	int32_t  rv = 0;
   10299 	uint32_t invm_dword;
   10300 	uint16_t i;
   10301 	uint8_t record_type, word_address;
   10302 
   10303 	for (i = 0; i < INVM_SIZE; i++) {
   10304 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10305 		/* Get record type */
   10306 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10307 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10308 			break;
   10309 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10310 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10311 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10312 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10313 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10314 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10315 			if (word_address == address) {
   10316 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10317 				rv = 0;
   10318 				break;
   10319 			}
   10320 		}
   10321 	}
   10322 
   10323 	return rv;
   10324 }
   10325 
   10326 static int
   10327 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10328 {
   10329 	int rv = 0;
   10330 	int i;
   10331 
   10332 	for (i = 0; i < words; i++) {
   10333 		switch (offset + i) {
   10334 		case NVM_OFF_MACADDR:
   10335 		case NVM_OFF_MACADDR1:
   10336 		case NVM_OFF_MACADDR2:
   10337 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10338 			if (rv != 0) {
   10339 				data[i] = 0xffff;
   10340 				rv = -1;
   10341 			}
   10342 			break;
   10343 		case NVM_OFF_CFG2:
   10344 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10345 			if (rv != 0) {
   10346 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10347 				rv = 0;
   10348 			}
   10349 			break;
   10350 		case NVM_OFF_CFG4:
   10351 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10352 			if (rv != 0) {
   10353 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10354 				rv = 0;
   10355 			}
   10356 			break;
   10357 		case NVM_OFF_LED_1_CFG:
   10358 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10359 			if (rv != 0) {
   10360 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10361 				rv = 0;
   10362 			}
   10363 			break;
   10364 		case NVM_OFF_LED_0_2_CFG:
   10365 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10366 			if (rv != 0) {
   10367 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10368 				rv = 0;
   10369 			}
   10370 			break;
   10371 		case NVM_OFF_ID_LED_SETTINGS:
   10372 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10373 			if (rv != 0) {
   10374 				*data = ID_LED_RESERVED_FFFF;
   10375 				rv = 0;
   10376 			}
   10377 			break;
   10378 		default:
   10379 			DPRINTF(WM_DEBUG_NVM,
   10380 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10381 			*data = NVM_RESERVED_WORD;
   10382 			break;
   10383 		}
   10384 	}
   10385 
   10386 	return rv;
   10387 }
   10388 
   10389 /* Lock, detecting NVM type, validate checksum, version and read */
   10390 
   10391 /*
   10392  * wm_nvm_acquire:
   10393  *
   10394  *	Perform the EEPROM handshake required on some chips.
   10395  */
   10396 static int
   10397 wm_nvm_acquire(struct wm_softc *sc)
   10398 {
   10399 	uint32_t reg;
   10400 	int x;
   10401 	int ret = 0;
   10402 
   10403 	/* always success */
   10404 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10405 		return 0;
   10406 
   10407 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10408 		ret = wm_get_swfwhw_semaphore(sc);
   10409 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10410 		/* This will also do wm_get_swsm_semaphore() if needed */
   10411 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10412 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10413 		ret = wm_get_swsm_semaphore(sc);
   10414 	}
   10415 
   10416 	if (ret) {
   10417 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10418 			__func__);
   10419 		return 1;
   10420 	}
   10421 
   10422 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10423 		reg = CSR_READ(sc, WMREG_EECD);
   10424 
   10425 		/* Request EEPROM access. */
   10426 		reg |= EECD_EE_REQ;
   10427 		CSR_WRITE(sc, WMREG_EECD, reg);
   10428 
   10429 		/* ..and wait for it to be granted. */
   10430 		for (x = 0; x < 1000; x++) {
   10431 			reg = CSR_READ(sc, WMREG_EECD);
   10432 			if (reg & EECD_EE_GNT)
   10433 				break;
   10434 			delay(5);
   10435 		}
   10436 		if ((reg & EECD_EE_GNT) == 0) {
   10437 			aprint_error_dev(sc->sc_dev,
   10438 			    "could not acquire EEPROM GNT\n");
   10439 			reg &= ~EECD_EE_REQ;
   10440 			CSR_WRITE(sc, WMREG_EECD, reg);
   10441 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10442 				wm_put_swfwhw_semaphore(sc);
   10443 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10444 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10445 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10446 				wm_put_swsm_semaphore(sc);
   10447 			return 1;
   10448 		}
   10449 	}
   10450 
   10451 	return 0;
   10452 }
   10453 
   10454 /*
   10455  * wm_nvm_release:
   10456  *
   10457  *	Release the EEPROM mutex.
   10458  */
   10459 static void
   10460 wm_nvm_release(struct wm_softc *sc)
   10461 {
   10462 	uint32_t reg;
   10463 
   10464 	/* always success */
   10465 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10466 		return;
   10467 
   10468 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10469 		reg = CSR_READ(sc, WMREG_EECD);
   10470 		reg &= ~EECD_EE_REQ;
   10471 		CSR_WRITE(sc, WMREG_EECD, reg);
   10472 	}
   10473 
   10474 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10475 		wm_put_swfwhw_semaphore(sc);
   10476 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10477 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10478 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10479 		wm_put_swsm_semaphore(sc);
   10480 }
   10481 
   10482 static int
   10483 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10484 {
   10485 	uint32_t eecd = 0;
   10486 
   10487 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10488 	    || sc->sc_type == WM_T_82583) {
   10489 		eecd = CSR_READ(sc, WMREG_EECD);
   10490 
   10491 		/* Isolate bits 15 & 16 */
   10492 		eecd = ((eecd >> 15) & 0x03);
   10493 
   10494 		/* If both bits are set, device is Flash type */
   10495 		if (eecd == 0x03)
   10496 			return 0;
   10497 	}
   10498 	return 1;
   10499 }
   10500 
   10501 static int
   10502 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10503 {
   10504 	uint32_t eec;
   10505 
   10506 	eec = CSR_READ(sc, WMREG_EEC);
   10507 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10508 		return 1;
   10509 
   10510 	return 0;
   10511 }
   10512 
   10513 /*
   10514  * wm_nvm_validate_checksum
   10515  *
   10516  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10517  */
   10518 static int
   10519 wm_nvm_validate_checksum(struct wm_softc *sc)
   10520 {
   10521 	uint16_t checksum;
   10522 	uint16_t eeprom_data;
   10523 #ifdef WM_DEBUG
   10524 	uint16_t csum_wordaddr, valid_checksum;
   10525 #endif
   10526 	int i;
   10527 
   10528 	checksum = 0;
   10529 
   10530 	/* Don't check for I211 */
   10531 	if (sc->sc_type == WM_T_I211)
   10532 		return 0;
   10533 
   10534 #ifdef WM_DEBUG
   10535 	if (sc->sc_type == WM_T_PCH_LPT) {
   10536 		csum_wordaddr = NVM_OFF_COMPAT;
   10537 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10538 	} else {
   10539 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10540 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10541 	}
   10542 
   10543 	/* Dump EEPROM image for debug */
   10544 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10545 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10546 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10547 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10548 		if ((eeprom_data & valid_checksum) == 0) {
   10549 			DPRINTF(WM_DEBUG_NVM,
   10550 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10551 				device_xname(sc->sc_dev), eeprom_data,
   10552 				    valid_checksum));
   10553 		}
   10554 	}
   10555 
   10556 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10557 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10558 		for (i = 0; i < NVM_SIZE; i++) {
   10559 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10560 				printf("XXXX ");
   10561 			else
   10562 				printf("%04hx ", eeprom_data);
   10563 			if (i % 8 == 7)
   10564 				printf("\n");
   10565 		}
   10566 	}
   10567 
   10568 #endif /* WM_DEBUG */
   10569 
   10570 	for (i = 0; i < NVM_SIZE; i++) {
   10571 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10572 			return 1;
   10573 		checksum += eeprom_data;
   10574 	}
   10575 
   10576 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10577 #ifdef WM_DEBUG
   10578 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10579 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10580 #endif
   10581 	}
   10582 
   10583 	return 0;
   10584 }
   10585 
   10586 static void
   10587 wm_nvm_version_invm(struct wm_softc *sc)
   10588 {
   10589 	uint32_t dword;
   10590 
   10591 	/*
   10592 	 * Linux's code to decode version is very strange, so we don't
   10593 	 * obey that algorithm and just use word 61 as the document.
   10594 	 * Perhaps it's not perfect though...
   10595 	 *
   10596 	 * Example:
   10597 	 *
   10598 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10599 	 */
   10600 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10601 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10602 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10603 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10604 }
   10605 
   10606 static void
   10607 wm_nvm_version(struct wm_softc *sc)
   10608 {
   10609 	uint16_t major, minor, build, patch;
   10610 	uint16_t uid0, uid1;
   10611 	uint16_t nvm_data;
   10612 	uint16_t off;
   10613 	bool check_version = false;
   10614 	bool check_optionrom = false;
   10615 	bool have_build = false;
   10616 
   10617 	/*
   10618 	 * Version format:
   10619 	 *
   10620 	 * XYYZ
   10621 	 * X0YZ
   10622 	 * X0YY
   10623 	 *
   10624 	 * Example:
   10625 	 *
   10626 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10627 	 *	82571	0x50a6	5.10.6?
   10628 	 *	82572	0x506a	5.6.10?
   10629 	 *	82572EI	0x5069	5.6.9?
   10630 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10631 	 *		0x2013	2.1.3?
   10632 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10633 	 */
   10634 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10635 	switch (sc->sc_type) {
   10636 	case WM_T_82571:
   10637 	case WM_T_82572:
   10638 	case WM_T_82574:
   10639 	case WM_T_82583:
   10640 		check_version = true;
   10641 		check_optionrom = true;
   10642 		have_build = true;
   10643 		break;
   10644 	case WM_T_82575:
   10645 	case WM_T_82576:
   10646 	case WM_T_82580:
   10647 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10648 			check_version = true;
   10649 		break;
   10650 	case WM_T_I211:
   10651 		wm_nvm_version_invm(sc);
   10652 		goto printver;
   10653 	case WM_T_I210:
   10654 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10655 			wm_nvm_version_invm(sc);
   10656 			goto printver;
   10657 		}
   10658 		/* FALLTHROUGH */
   10659 	case WM_T_I350:
   10660 	case WM_T_I354:
   10661 		check_version = true;
   10662 		check_optionrom = true;
   10663 		break;
   10664 	default:
   10665 		return;
   10666 	}
   10667 	if (check_version) {
   10668 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10669 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10670 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10671 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10672 			build = nvm_data & NVM_BUILD_MASK;
   10673 			have_build = true;
   10674 		} else
   10675 			minor = nvm_data & 0x00ff;
   10676 
   10677 		/* Decimal */
   10678 		minor = (minor / 16) * 10 + (minor % 16);
   10679 		sc->sc_nvm_ver_major = major;
   10680 		sc->sc_nvm_ver_minor = minor;
   10681 
   10682 printver:
   10683 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10684 		    sc->sc_nvm_ver_minor);
   10685 		if (have_build) {
   10686 			sc->sc_nvm_ver_build = build;
   10687 			aprint_verbose(".%d", build);
   10688 		}
   10689 	}
   10690 	if (check_optionrom) {
   10691 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10692 		/* Option ROM Version */
   10693 		if ((off != 0x0000) && (off != 0xffff)) {
   10694 			off += NVM_COMBO_VER_OFF;
   10695 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10696 			wm_nvm_read(sc, off, 1, &uid0);
   10697 			if ((uid0 != 0) && (uid0 != 0xffff)
   10698 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10699 				/* 16bits */
   10700 				major = uid0 >> 8;
   10701 				build = (uid0 << 8) | (uid1 >> 8);
   10702 				patch = uid1 & 0x00ff;
   10703 				aprint_verbose(", option ROM Version %d.%d.%d",
   10704 				    major, build, patch);
   10705 			}
   10706 		}
   10707 	}
   10708 
   10709 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10710 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10711 }
   10712 
   10713 /*
   10714  * wm_nvm_read:
   10715  *
   10716  *	Read data from the serial EEPROM.
   10717  */
   10718 static int
   10719 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10720 {
   10721 	int rv;
   10722 
   10723 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10724 		return 1;
   10725 
   10726 	if (wm_nvm_acquire(sc))
   10727 		return 1;
   10728 
   10729 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10730 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10731 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10732 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10733 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10734 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10735 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10736 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10737 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10738 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10739 	else
   10740 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10741 
   10742 	wm_nvm_release(sc);
   10743 	return rv;
   10744 }
   10745 
   10746 /*
   10747  * Hardware semaphores.
   10748  * Very complexed...
   10749  */
   10750 
   10751 static int
   10752 wm_get_swsm_semaphore(struct wm_softc *sc)
   10753 {
   10754 	int32_t timeout;
   10755 	uint32_t swsm;
   10756 
   10757 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10758 		/* Get the SW semaphore. */
   10759 		timeout = sc->sc_nvm_wordsize + 1;
   10760 		while (timeout) {
   10761 			swsm = CSR_READ(sc, WMREG_SWSM);
   10762 
   10763 			if ((swsm & SWSM_SMBI) == 0)
   10764 				break;
   10765 
   10766 			delay(50);
   10767 			timeout--;
   10768 		}
   10769 
   10770 		if (timeout == 0) {
   10771 			aprint_error_dev(sc->sc_dev,
   10772 			    "could not acquire SWSM SMBI\n");
   10773 			return 1;
   10774 		}
   10775 	}
   10776 
   10777 	/* Get the FW semaphore. */
   10778 	timeout = sc->sc_nvm_wordsize + 1;
   10779 	while (timeout) {
   10780 		swsm = CSR_READ(sc, WMREG_SWSM);
   10781 		swsm |= SWSM_SWESMBI;
   10782 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   10783 		/* If we managed to set the bit we got the semaphore. */
   10784 		swsm = CSR_READ(sc, WMREG_SWSM);
   10785 		if (swsm & SWSM_SWESMBI)
   10786 			break;
   10787 
   10788 		delay(50);
   10789 		timeout--;
   10790 	}
   10791 
   10792 	if (timeout == 0) {
   10793 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   10794 		/* Release semaphores */
   10795 		wm_put_swsm_semaphore(sc);
   10796 		return 1;
   10797 	}
   10798 	return 0;
   10799 }
   10800 
   10801 static void
   10802 wm_put_swsm_semaphore(struct wm_softc *sc)
   10803 {
   10804 	uint32_t swsm;
   10805 
   10806 	swsm = CSR_READ(sc, WMREG_SWSM);
   10807 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   10808 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   10809 }
   10810 
   10811 static int
   10812 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10813 {
   10814 	uint32_t swfw_sync;
   10815 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   10816 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   10817 	int timeout = 200;
   10818 
   10819 	for (timeout = 0; timeout < 200; timeout++) {
   10820 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10821 			if (wm_get_swsm_semaphore(sc)) {
   10822 				aprint_error_dev(sc->sc_dev,
   10823 				    "%s: failed to get semaphore\n",
   10824 				    __func__);
   10825 				return 1;
   10826 			}
   10827 		}
   10828 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10829 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   10830 			swfw_sync |= swmask;
   10831 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10832 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   10833 				wm_put_swsm_semaphore(sc);
   10834 			return 0;
   10835 		}
   10836 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   10837 			wm_put_swsm_semaphore(sc);
   10838 		delay(5000);
   10839 	}
   10840 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   10841 	    device_xname(sc->sc_dev), mask, swfw_sync);
   10842 	return 1;
   10843 }
   10844 
   10845 static void
   10846 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10847 {
   10848 	uint32_t swfw_sync;
   10849 
   10850 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10851 		while (wm_get_swsm_semaphore(sc) != 0)
   10852 			continue;
   10853 	}
   10854 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10855 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   10856 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10857 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   10858 		wm_put_swsm_semaphore(sc);
   10859 }
   10860 
   10861 static int
   10862 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   10863 {
   10864 	uint32_t ext_ctrl;
   10865 	int timeout = 200;
   10866 
   10867 	for (timeout = 0; timeout < 200; timeout++) {
   10868 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10869 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10870 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10871 
   10872 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10873 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   10874 			return 0;
   10875 		delay(5000);
   10876 	}
   10877 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   10878 	    device_xname(sc->sc_dev), ext_ctrl);
   10879 	return 1;
   10880 }
   10881 
   10882 static void
   10883 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   10884 {
   10885 	uint32_t ext_ctrl;
   10886 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10887 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10888 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10889 }
   10890 
   10891 static int
   10892 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   10893 {
   10894 	int i = 0;
   10895 	uint32_t reg;
   10896 
   10897 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10898 	do {
   10899 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   10900 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   10901 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10902 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   10903 			break;
   10904 		delay(2*1000);
   10905 		i++;
   10906 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   10907 
   10908 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   10909 		wm_put_hw_semaphore_82573(sc);
   10910 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   10911 		    device_xname(sc->sc_dev));
   10912 		return -1;
   10913 	}
   10914 
   10915 	return 0;
   10916 }
   10917 
   10918 static void
   10919 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   10920 {
   10921 	uint32_t reg;
   10922 
   10923 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10924 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10925 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10926 }
   10927 
   10928 /*
   10929  * Management mode and power management related subroutines.
   10930  * BMC, AMT, suspend/resume and EEE.
   10931  */
   10932 
   10933 #ifdef WM_WOL
   10934 static int
   10935 wm_check_mng_mode(struct wm_softc *sc)
   10936 {
   10937 	int rv;
   10938 
   10939 	switch (sc->sc_type) {
   10940 	case WM_T_ICH8:
   10941 	case WM_T_ICH9:
   10942 	case WM_T_ICH10:
   10943 	case WM_T_PCH:
   10944 	case WM_T_PCH2:
   10945 	case WM_T_PCH_LPT:
   10946 		rv = wm_check_mng_mode_ich8lan(sc);
   10947 		break;
   10948 	case WM_T_82574:
   10949 	case WM_T_82583:
   10950 		rv = wm_check_mng_mode_82574(sc);
   10951 		break;
   10952 	case WM_T_82571:
   10953 	case WM_T_82572:
   10954 	case WM_T_82573:
   10955 	case WM_T_80003:
   10956 		rv = wm_check_mng_mode_generic(sc);
   10957 		break;
   10958 	default:
   10959 		/* noting to do */
   10960 		rv = 0;
   10961 		break;
   10962 	}
   10963 
   10964 	return rv;
   10965 }
   10966 
   10967 static int
   10968 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   10969 {
   10970 	uint32_t fwsm;
   10971 
   10972 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10973 
   10974 	if (((fwsm & FWSM_FW_VALID) != 0) &&
   10975 	    (fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   10976 		return 1;
   10977 
   10978 	return 0;
   10979 }
   10980 
   10981 static int
   10982 wm_check_mng_mode_82574(struct wm_softc *sc)
   10983 {
   10984 	uint16_t data;
   10985 
   10986 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   10987 
   10988 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   10989 		return 1;
   10990 
   10991 	return 0;
   10992 }
   10993 
   10994 static int
   10995 wm_check_mng_mode_generic(struct wm_softc *sc)
   10996 {
   10997 	uint32_t fwsm;
   10998 
   10999 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11000 
   11001 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   11002 		return 1;
   11003 
   11004 	return 0;
   11005 }
   11006 #endif /* WM_WOL */
   11007 
   11008 static int
   11009 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11010 {
   11011 	uint32_t manc, fwsm, factps;
   11012 
   11013 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11014 		return 0;
   11015 
   11016 	manc = CSR_READ(sc, WMREG_MANC);
   11017 
   11018 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11019 		device_xname(sc->sc_dev), manc));
   11020 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11021 		return 0;
   11022 
   11023 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11024 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11025 		factps = CSR_READ(sc, WMREG_FACTPS);
   11026 		if (((factps & FACTPS_MNGCG) == 0)
   11027 		    && ((fwsm & FWSM_MODE_MASK)
   11028 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   11029 			return 1;
   11030 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11031 		uint16_t data;
   11032 
   11033 		factps = CSR_READ(sc, WMREG_FACTPS);
   11034 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11035 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11036 			device_xname(sc->sc_dev), factps, data));
   11037 		if (((factps & FACTPS_MNGCG) == 0)
   11038 		    && ((data & NVM_CFG2_MNGM_MASK)
   11039 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11040 			return 1;
   11041 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11042 	    && ((manc & MANC_ASF_EN) == 0))
   11043 		return 1;
   11044 
   11045 	return 0;
   11046 }
   11047 
   11048 static int
   11049 wm_check_reset_block(struct wm_softc *sc)
   11050 {
   11051 	bool blocked = false;
   11052 	uint32_t reg;
   11053 	int i = 0;
   11054 
   11055 	switch (sc->sc_type) {
   11056 	case WM_T_ICH8:
   11057 	case WM_T_ICH9:
   11058 	case WM_T_ICH10:
   11059 	case WM_T_PCH:
   11060 	case WM_T_PCH2:
   11061 	case WM_T_PCH_LPT:
   11062 		do {
   11063 			reg = CSR_READ(sc, WMREG_FWSM);
   11064 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11065 				blocked = true;
   11066 				delay(10*1000);
   11067 				continue;
   11068 			}
   11069 			blocked = false;
   11070 		} while (blocked && (i++ < 10));
   11071 		return blocked ? 1 : 0;
   11072 		break;
   11073 	case WM_T_82571:
   11074 	case WM_T_82572:
   11075 	case WM_T_82573:
   11076 	case WM_T_82574:
   11077 	case WM_T_82583:
   11078 	case WM_T_80003:
   11079 		reg = CSR_READ(sc, WMREG_MANC);
   11080 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11081 			return -1;
   11082 		else
   11083 			return 0;
   11084 		break;
   11085 	default:
   11086 		/* no problem */
   11087 		break;
   11088 	}
   11089 
   11090 	return 0;
   11091 }
   11092 
   11093 static void
   11094 wm_get_hw_control(struct wm_softc *sc)
   11095 {
   11096 	uint32_t reg;
   11097 
   11098 	switch (sc->sc_type) {
   11099 	case WM_T_82573:
   11100 		reg = CSR_READ(sc, WMREG_SWSM);
   11101 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11102 		break;
   11103 	case WM_T_82571:
   11104 	case WM_T_82572:
   11105 	case WM_T_82574:
   11106 	case WM_T_82583:
   11107 	case WM_T_80003:
   11108 	case WM_T_ICH8:
   11109 	case WM_T_ICH9:
   11110 	case WM_T_ICH10:
   11111 	case WM_T_PCH:
   11112 	case WM_T_PCH2:
   11113 	case WM_T_PCH_LPT:
   11114 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11115 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11116 		break;
   11117 	default:
   11118 		break;
   11119 	}
   11120 }
   11121 
   11122 static void
   11123 wm_release_hw_control(struct wm_softc *sc)
   11124 {
   11125 	uint32_t reg;
   11126 
   11127 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11128 		return;
   11129 
   11130 	if (sc->sc_type == WM_T_82573) {
   11131 		reg = CSR_READ(sc, WMREG_SWSM);
   11132 		reg &= ~SWSM_DRV_LOAD;
   11133 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11134 	} else {
   11135 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11136 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11137 	}
   11138 }
   11139 
   11140 static void
   11141 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   11142 {
   11143 	uint32_t reg;
   11144 
   11145 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11146 
   11147 	if (on != 0)
   11148 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11149 	else
   11150 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11151 
   11152 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11153 }
   11154 
   11155 static void
   11156 wm_smbustopci(struct wm_softc *sc)
   11157 {
   11158 	uint32_t fwsm;
   11159 
   11160 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11161 	if (((fwsm & FWSM_FW_VALID) == 0)
   11162 	    && ((wm_check_reset_block(sc) == 0))) {
   11163 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11164 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11165 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11166 		CSR_WRITE_FLUSH(sc);
   11167 		delay(10);
   11168 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11169 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11170 		CSR_WRITE_FLUSH(sc);
   11171 		delay(50*1000);
   11172 
   11173 		/*
   11174 		 * Gate automatic PHY configuration by hardware on non-managed
   11175 		 * 82579
   11176 		 */
   11177 		if (sc->sc_type == WM_T_PCH2)
   11178 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   11179 	}
   11180 }
   11181 
   11182 static void
   11183 wm_init_manageability(struct wm_softc *sc)
   11184 {
   11185 
   11186 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11187 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11188 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11189 
   11190 		/* Disable hardware interception of ARP */
   11191 		manc &= ~MANC_ARP_EN;
   11192 
   11193 		/* Enable receiving management packets to the host */
   11194 		if (sc->sc_type >= WM_T_82571) {
   11195 			manc |= MANC_EN_MNG2HOST;
   11196 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11197 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11198 		}
   11199 
   11200 		CSR_WRITE(sc, WMREG_MANC, manc);
   11201 	}
   11202 }
   11203 
   11204 static void
   11205 wm_release_manageability(struct wm_softc *sc)
   11206 {
   11207 
   11208 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11209 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11210 
   11211 		manc |= MANC_ARP_EN;
   11212 		if (sc->sc_type >= WM_T_82571)
   11213 			manc &= ~MANC_EN_MNG2HOST;
   11214 
   11215 		CSR_WRITE(sc, WMREG_MANC, manc);
   11216 	}
   11217 }
   11218 
   11219 static void
   11220 wm_get_wakeup(struct wm_softc *sc)
   11221 {
   11222 
   11223 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11224 	switch (sc->sc_type) {
   11225 	case WM_T_82573:
   11226 	case WM_T_82583:
   11227 		sc->sc_flags |= WM_F_HAS_AMT;
   11228 		/* FALLTHROUGH */
   11229 	case WM_T_80003:
   11230 	case WM_T_82541:
   11231 	case WM_T_82547:
   11232 	case WM_T_82571:
   11233 	case WM_T_82572:
   11234 	case WM_T_82574:
   11235 	case WM_T_82575:
   11236 	case WM_T_82576:
   11237 	case WM_T_82580:
   11238 	case WM_T_I350:
   11239 	case WM_T_I354:
   11240 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   11241 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11242 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11243 		break;
   11244 	case WM_T_ICH8:
   11245 	case WM_T_ICH9:
   11246 	case WM_T_ICH10:
   11247 	case WM_T_PCH:
   11248 	case WM_T_PCH2:
   11249 	case WM_T_PCH_LPT:
   11250 		sc->sc_flags |= WM_F_HAS_AMT;
   11251 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11252 		break;
   11253 	default:
   11254 		break;
   11255 	}
   11256 
   11257 	/* 1: HAS_MANAGE */
   11258 	if (wm_enable_mng_pass_thru(sc) != 0)
   11259 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11260 
   11261 #ifdef WM_DEBUG
   11262 	printf("\n");
   11263 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11264 		printf("HAS_AMT,");
   11265 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11266 		printf("ARC_SUBSYS_VALID,");
   11267 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11268 		printf("ASF_FIRMWARE_PRES,");
   11269 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11270 		printf("HAS_MANAGE,");
   11271 	printf("\n");
   11272 #endif
   11273 	/*
   11274 	 * Note that the WOL flags is set after the resetting of the eeprom
   11275 	 * stuff
   11276 	 */
   11277 }
   11278 
   11279 #ifdef WM_WOL
   11280 /* WOL in the newer chipset interfaces (pchlan) */
   11281 static void
   11282 wm_enable_phy_wakeup(struct wm_softc *sc)
   11283 {
   11284 #if 0
   11285 	uint16_t preg;
   11286 
   11287 	/* Copy MAC RARs to PHY RARs */
   11288 
   11289 	/* Copy MAC MTA to PHY MTA */
   11290 
   11291 	/* Configure PHY Rx Control register */
   11292 
   11293 	/* Enable PHY wakeup in MAC register */
   11294 
   11295 	/* Configure and enable PHY wakeup in PHY registers */
   11296 
   11297 	/* Activate PHY wakeup */
   11298 
   11299 	/* XXX */
   11300 #endif
   11301 }
   11302 
   11303 /* Power down workaround on D3 */
   11304 static void
   11305 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11306 {
   11307 	uint32_t reg;
   11308 	int i;
   11309 
   11310 	for (i = 0; i < 2; i++) {
   11311 		/* Disable link */
   11312 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11313 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11314 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11315 
   11316 		/*
   11317 		 * Call gig speed drop workaround on Gig disable before
   11318 		 * accessing any PHY registers
   11319 		 */
   11320 		if (sc->sc_type == WM_T_ICH8)
   11321 			wm_gig_downshift_workaround_ich8lan(sc);
   11322 
   11323 		/* Write VR power-down enable */
   11324 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11325 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11326 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11327 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11328 
   11329 		/* Read it back and test */
   11330 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11331 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11332 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11333 			break;
   11334 
   11335 		/* Issue PHY reset and repeat at most one more time */
   11336 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11337 	}
   11338 }
   11339 
   11340 static void
   11341 wm_enable_wakeup(struct wm_softc *sc)
   11342 {
   11343 	uint32_t reg, pmreg;
   11344 	pcireg_t pmode;
   11345 
   11346 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11347 		&pmreg, NULL) == 0)
   11348 		return;
   11349 
   11350 	/* Advertise the wakeup capability */
   11351 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11352 	    | CTRL_SWDPIN(3));
   11353 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11354 
   11355 	/* ICH workaround */
   11356 	switch (sc->sc_type) {
   11357 	case WM_T_ICH8:
   11358 	case WM_T_ICH9:
   11359 	case WM_T_ICH10:
   11360 	case WM_T_PCH:
   11361 	case WM_T_PCH2:
   11362 	case WM_T_PCH_LPT:
   11363 		/* Disable gig during WOL */
   11364 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11365 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11366 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11367 		if (sc->sc_type == WM_T_PCH)
   11368 			wm_gmii_reset(sc);
   11369 
   11370 		/* Power down workaround */
   11371 		if (sc->sc_phytype == WMPHY_82577) {
   11372 			struct mii_softc *child;
   11373 
   11374 			/* Assume that the PHY is copper */
   11375 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11376 			if (child->mii_mpd_rev <= 2)
   11377 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11378 				    (768 << 5) | 25, 0x0444); /* magic num */
   11379 		}
   11380 		break;
   11381 	default:
   11382 		break;
   11383 	}
   11384 
   11385 	/* Keep the laser running on fiber adapters */
   11386 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11387 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11388 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11389 		reg |= CTRL_EXT_SWDPIN(3);
   11390 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11391 	}
   11392 
   11393 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11394 #if 0	/* for the multicast packet */
   11395 	reg |= WUFC_MC;
   11396 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11397 #endif
   11398 
   11399 	if (sc->sc_type == WM_T_PCH) {
   11400 		wm_enable_phy_wakeup(sc);
   11401 	} else {
   11402 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11403 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11404 	}
   11405 
   11406 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11407 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11408 		|| (sc->sc_type == WM_T_PCH2))
   11409 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11410 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11411 
   11412 	/* Request PME */
   11413 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11414 #if 0
   11415 	/* Disable WOL */
   11416 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11417 #else
   11418 	/* For WOL */
   11419 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11420 #endif
   11421 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11422 }
   11423 #endif /* WM_WOL */
   11424 
   11425 /* LPLU */
   11426 
   11427 static void
   11428 wm_lplu_d0_disable(struct wm_softc *sc)
   11429 {
   11430 	uint32_t reg;
   11431 
   11432 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11433 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11434 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11435 }
   11436 
   11437 static void
   11438 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11439 {
   11440 	uint32_t reg;
   11441 
   11442 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11443 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11444 	reg |= HV_OEM_BITS_ANEGNOW;
   11445 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11446 }
   11447 
   11448 /* EEE */
   11449 
   11450 static void
   11451 wm_set_eee_i350(struct wm_softc *sc)
   11452 {
   11453 	uint32_t ipcnfg, eeer;
   11454 
   11455 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11456 	eeer = CSR_READ(sc, WMREG_EEER);
   11457 
   11458 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11459 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11460 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11461 		    | EEER_LPI_FC);
   11462 	} else {
   11463 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11464 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11465 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11466 		    | EEER_LPI_FC);
   11467 	}
   11468 
   11469 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11470 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11471 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11472 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11473 }
   11474 
   11475 /*
   11476  * Workarounds (mainly PHY related).
   11477  * Basically, PHY's workarounds are in the PHY drivers.
   11478  */
   11479 
   11480 /* Work-around for 82566 Kumeran PCS lock loss */
   11481 static void
   11482 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11483 {
   11484 #if 0
   11485 	int miistatus, active, i;
   11486 	int reg;
   11487 
   11488 	miistatus = sc->sc_mii.mii_media_status;
   11489 
   11490 	/* If the link is not up, do nothing */
   11491 	if ((miistatus & IFM_ACTIVE) == 0)
   11492 		return;
   11493 
   11494 	active = sc->sc_mii.mii_media_active;
   11495 
   11496 	/* Nothing to do if the link is other than 1Gbps */
   11497 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11498 		return;
   11499 
   11500 	for (i = 0; i < 10; i++) {
   11501 		/* read twice */
   11502 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11503 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11504 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11505 			goto out;	/* GOOD! */
   11506 
   11507 		/* Reset the PHY */
   11508 		wm_gmii_reset(sc);
   11509 		delay(5*1000);
   11510 	}
   11511 
   11512 	/* Disable GigE link negotiation */
   11513 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11514 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11515 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11516 
   11517 	/*
   11518 	 * Call gig speed drop workaround on Gig disable before accessing
   11519 	 * any PHY registers.
   11520 	 */
   11521 	wm_gig_downshift_workaround_ich8lan(sc);
   11522 
   11523 out:
   11524 	return;
   11525 #endif
   11526 }
   11527 
   11528 /* WOL from S5 stops working */
   11529 static void
   11530 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11531 {
   11532 	uint16_t kmrn_reg;
   11533 
   11534 	/* Only for igp3 */
   11535 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11536 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11537 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11538 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11539 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11540 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11541 	}
   11542 }
   11543 
   11544 /*
   11545  * Workaround for pch's PHYs
   11546  * XXX should be moved to new PHY driver?
   11547  */
   11548 static void
   11549 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11550 {
   11551 	if (sc->sc_phytype == WMPHY_82577)
   11552 		wm_set_mdio_slow_mode_hv(sc);
   11553 
   11554 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11555 
   11556 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11557 
   11558 	/* 82578 */
   11559 	if (sc->sc_phytype == WMPHY_82578) {
   11560 		/* PCH rev. < 3 */
   11561 		if (sc->sc_rev < 3) {
   11562 			/* XXX 6 bit shift? Why? Is it page2? */
   11563 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11564 			    0x66c0);
   11565 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11566 			    0xffff);
   11567 		}
   11568 
   11569 		/* XXX phy rev. < 2 */
   11570 	}
   11571 
   11572 	/* Select page 0 */
   11573 
   11574 	/* XXX acquire semaphore */
   11575 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11576 	/* XXX release semaphore */
   11577 
   11578 	/*
   11579 	 * Configure the K1 Si workaround during phy reset assuming there is
   11580 	 * link so that it disables K1 if link is in 1Gbps.
   11581 	 */
   11582 	wm_k1_gig_workaround_hv(sc, 1);
   11583 }
   11584 
   11585 static void
   11586 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11587 {
   11588 
   11589 	wm_set_mdio_slow_mode_hv(sc);
   11590 }
   11591 
   11592 static void
   11593 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11594 {
   11595 	int k1_enable = sc->sc_nvm_k1_enabled;
   11596 
   11597 	/* XXX acquire semaphore */
   11598 
   11599 	if (link) {
   11600 		k1_enable = 0;
   11601 
   11602 		/* Link stall fix for link up */
   11603 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11604 	} else {
   11605 		/* Link stall fix for link down */
   11606 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11607 	}
   11608 
   11609 	wm_configure_k1_ich8lan(sc, k1_enable);
   11610 
   11611 	/* XXX release semaphore */
   11612 }
   11613 
   11614 static void
   11615 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11616 {
   11617 	uint32_t reg;
   11618 
   11619 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11620 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11621 	    reg | HV_KMRN_MDIO_SLOW);
   11622 }
   11623 
   11624 static void
   11625 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11626 {
   11627 	uint32_t ctrl, ctrl_ext, tmp;
   11628 	uint16_t kmrn_reg;
   11629 
   11630 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11631 
   11632 	if (k1_enable)
   11633 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11634 	else
   11635 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11636 
   11637 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11638 
   11639 	delay(20);
   11640 
   11641 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11642 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11643 
   11644 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11645 	tmp |= CTRL_FRCSPD;
   11646 
   11647 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11648 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11649 	CSR_WRITE_FLUSH(sc);
   11650 	delay(20);
   11651 
   11652 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11653 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11654 	CSR_WRITE_FLUSH(sc);
   11655 	delay(20);
   11656 }
   11657 
   11658 /* special case - for 82575 - need to do manual init ... */
   11659 static void
   11660 wm_reset_init_script_82575(struct wm_softc *sc)
   11661 {
   11662 	/*
   11663 	 * remark: this is untested code - we have no board without EEPROM
   11664 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11665 	 */
   11666 
   11667 	/* SerDes configuration via SERDESCTRL */
   11668 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11669 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11670 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11671 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11672 
   11673 	/* CCM configuration via CCMCTL register */
   11674 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11675 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11676 
   11677 	/* PCIe lanes configuration */
   11678 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11679 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11680 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11681 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11682 
   11683 	/* PCIe PLL Configuration */
   11684 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11685 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11686 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11687 }
   11688 
   11689 static void
   11690 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11691 {
   11692 	uint32_t reg;
   11693 	uint16_t nvmword;
   11694 	int rv;
   11695 
   11696 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11697 		return;
   11698 
   11699 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11700 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11701 	if (rv != 0) {
   11702 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11703 		    __func__);
   11704 		return;
   11705 	}
   11706 
   11707 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11708 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11709 		reg |= MDICNFG_DEST;
   11710 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11711 		reg |= MDICNFG_COM_MDIO;
   11712 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11713 }
   11714 
   11715 /*
   11716  * I210 Errata 25 and I211 Errata 10
   11717  * Slow System Clock.
   11718  */
   11719 static void
   11720 wm_pll_workaround_i210(struct wm_softc *sc)
   11721 {
   11722 	uint32_t mdicnfg, wuc;
   11723 	uint32_t reg;
   11724 	pcireg_t pcireg;
   11725 	uint32_t pmreg;
   11726 	uint16_t nvmword, tmp_nvmword;
   11727 	int phyval;
   11728 	bool wa_done = false;
   11729 	int i;
   11730 
   11731 	/* Save WUC and MDICNFG registers */
   11732 	wuc = CSR_READ(sc, WMREG_WUC);
   11733 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   11734 
   11735 	reg = mdicnfg & ~MDICNFG_DEST;
   11736 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11737 
   11738 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   11739 		nvmword = INVM_DEFAULT_AL;
   11740 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   11741 
   11742 	/* Get Power Management cap offset */
   11743 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11744 		&pmreg, NULL) == 0)
   11745 		return;
   11746 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   11747 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   11748 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   11749 
   11750 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   11751 			break; /* OK */
   11752 		}
   11753 
   11754 		wa_done = true;
   11755 		/* Directly reset the internal PHY */
   11756 		reg = CSR_READ(sc, WMREG_CTRL);
   11757 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   11758 
   11759 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11760 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   11761 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11762 
   11763 		CSR_WRITE(sc, WMREG_WUC, 0);
   11764 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   11765 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11766 
   11767 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   11768 		    pmreg + PCI_PMCSR);
   11769 		pcireg |= PCI_PMCSR_STATE_D3;
   11770 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11771 		    pmreg + PCI_PMCSR, pcireg);
   11772 		delay(1000);
   11773 		pcireg &= ~PCI_PMCSR_STATE_D3;
   11774 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11775 		    pmreg + PCI_PMCSR, pcireg);
   11776 
   11777 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   11778 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11779 
   11780 		/* Restore WUC register */
   11781 		CSR_WRITE(sc, WMREG_WUC, wuc);
   11782 	}
   11783 
   11784 	/* Restore MDICNFG setting */
   11785 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   11786 	if (wa_done)
   11787 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   11788 }
   11789