Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.334
      1 /*	$NetBSD: if_wm.c,v 1.334 2015/06/12 04:40:28 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- EEE (Energy Efficiency Ethernet)
     77  *	- MSI/MSI-X
     78  *	- Virtual Function
     79  *	- Set LED correctly (based on contents in EEPROM)
     80  *	- Rework how parameters are loaded from the EEPROM.
     81  */
     82 
     83 #include <sys/cdefs.h>
     84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.334 2015/06/12 04:40:28 msaitoh Exp $");
     85 
     86 #ifdef _KERNEL_OPT
     87 #include "opt_net_mpsafe.h"
     88 #endif
     89 
     90 #include <sys/param.h>
     91 #include <sys/systm.h>
     92 #include <sys/callout.h>
     93 #include <sys/mbuf.h>
     94 #include <sys/malloc.h>
     95 #include <sys/kernel.h>
     96 #include <sys/socket.h>
     97 #include <sys/ioctl.h>
     98 #include <sys/errno.h>
     99 #include <sys/device.h>
    100 #include <sys/queue.h>
    101 #include <sys/syslog.h>
    102 
    103 #include <sys/rndsource.h>
    104 
    105 #include <net/if.h>
    106 #include <net/if_dl.h>
    107 #include <net/if_media.h>
    108 #include <net/if_ether.h>
    109 
    110 #include <net/bpf.h>
    111 
    112 #include <netinet/in.h>			/* XXX for struct ip */
    113 #include <netinet/in_systm.h>		/* XXX for struct ip */
    114 #include <netinet/ip.h>			/* XXX for struct ip */
    115 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    116 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    117 
    118 #include <sys/bus.h>
    119 #include <sys/intr.h>
    120 #include <machine/endian.h>
    121 
    122 #include <dev/mii/mii.h>
    123 #include <dev/mii/miivar.h>
    124 #include <dev/mii/miidevs.h>
    125 #include <dev/mii/mii_bitbang.h>
    126 #include <dev/mii/ikphyreg.h>
    127 #include <dev/mii/igphyreg.h>
    128 #include <dev/mii/igphyvar.h>
    129 #include <dev/mii/inbmphyreg.h>
    130 
    131 #include <dev/pci/pcireg.h>
    132 #include <dev/pci/pcivar.h>
    133 #include <dev/pci/pcidevs.h>
    134 
    135 #include <dev/pci/if_wmreg.h>
    136 #include <dev/pci/if_wmvar.h>
    137 
    138 #ifdef WM_DEBUG
    139 #define	WM_DEBUG_LINK		0x01
    140 #define	WM_DEBUG_TX		0x02
    141 #define	WM_DEBUG_RX		0x04
    142 #define	WM_DEBUG_GMII		0x08
    143 #define	WM_DEBUG_MANAGE		0x10
    144 #define	WM_DEBUG_NVM		0x20
    145 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    146     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    147 
    148 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    149 #else
    150 #define	DPRINTF(x, y)	/* nothing */
    151 #endif /* WM_DEBUG */
    152 
    153 #ifdef NET_MPSAFE
    154 #define WM_MPSAFE	1
    155 #endif
    156 
    157 /*
    158  * Transmit descriptor list size.  Due to errata, we can only have
    159  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    160  * on >= 82544.  We tell the upper layers that they can queue a lot
    161  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    162  * of them at a time.
    163  *
    164  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    165  * chains containing many small mbufs have been observed in zero-copy
    166  * situations with jumbo frames.
    167  */
    168 #define	WM_NTXSEGS		256
    169 #define	WM_IFQUEUELEN		256
    170 #define	WM_TXQUEUELEN_MAX	64
    171 #define	WM_TXQUEUELEN_MAX_82547	16
    172 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    173 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    174 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    175 #define	WM_NTXDESC_82542	256
    176 #define	WM_NTXDESC_82544	4096
    177 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    178 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    179 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    180 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    181 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    182 
    183 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    184 
    185 /*
    186  * Receive descriptor list size.  We have one Rx buffer for normal
    187  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    188  * packet.  We allocate 256 receive descriptors, each with a 2k
    189  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    190  */
    191 #define	WM_NRXDESC		256
    192 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    193 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    194 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    195 
    196 /*
    197  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    198  * a single clump that maps to a single DMA segment to make several things
    199  * easier.
    200  */
    201 struct wm_control_data_82544 {
    202 	/*
    203 	 * The receive descriptors.
    204 	 */
    205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    206 
    207 	/*
    208 	 * The transmit descriptors.  Put these at the end, because
    209 	 * we might use a smaller number of them.
    210 	 */
    211 	union {
    212 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    213 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    214 	} wdc_u;
    215 };
    216 
    217 struct wm_control_data_82542 {
    218 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    219 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    220 };
    221 
    222 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    223 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    224 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    225 
    226 /*
    227  * Software state for transmit jobs.
    228  */
    229 struct wm_txsoft {
    230 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    231 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    232 	int txs_firstdesc;		/* first descriptor in packet */
    233 	int txs_lastdesc;		/* last descriptor in packet */
    234 	int txs_ndesc;			/* # of descriptors used */
    235 };
    236 
    237 /*
    238  * Software state for receive buffers.  Each descriptor gets a
    239  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    240  * more than one buffer, we chain them together.
    241  */
    242 struct wm_rxsoft {
    243 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    244 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    245 };
    246 
    247 #define WM_LINKUP_TIMEOUT	50
    248 
    249 static uint16_t swfwphysem[] = {
    250 	SWFW_PHY0_SM,
    251 	SWFW_PHY1_SM,
    252 	SWFW_PHY2_SM,
    253 	SWFW_PHY3_SM
    254 };
    255 
    256 static const uint32_t wm_82580_rxpbs_table[] = {
    257 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    258 };
    259 
    260 /*
    261  * Software state per device.
    262  */
    263 struct wm_softc {
    264 	device_t sc_dev;		/* generic device information */
    265 	bus_space_tag_t sc_st;		/* bus space tag */
    266 	bus_space_handle_t sc_sh;	/* bus space handle */
    267 	bus_size_t sc_ss;		/* bus space size */
    268 	bus_space_tag_t sc_iot;		/* I/O space tag */
    269 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    270 	bus_size_t sc_ios;		/* I/O space size */
    271 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    272 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    273 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    274 
    275 	struct ethercom sc_ethercom;	/* ethernet common data */
    276 	struct mii_data sc_mii;		/* MII/media information */
    277 
    278 	pci_chipset_tag_t sc_pc;
    279 	pcitag_t sc_pcitag;
    280 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    281 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    282 
    283 	uint16_t sc_pcidevid;		/* PCI device ID */
    284 	wm_chip_type sc_type;		/* MAC type */
    285 	int sc_rev;			/* MAC revision */
    286 	wm_phy_type sc_phytype;		/* PHY type */
    287 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    288 #define	WM_MEDIATYPE_UNKNOWN		0x00
    289 #define	WM_MEDIATYPE_FIBER		0x01
    290 #define	WM_MEDIATYPE_COPPER		0x02
    291 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    292 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    293 	int sc_flags;			/* flags; see below */
    294 	int sc_if_flags;		/* last if_flags */
    295 	int sc_flowflags;		/* 802.3x flow control flags */
    296 	int sc_align_tweak;
    297 
    298 	void *sc_ih;			/* interrupt cookie */
    299 	callout_t sc_tick_ch;		/* tick callout */
    300 	bool sc_stopping;
    301 
    302 	int sc_nvm_ver_major;
    303 	int sc_nvm_ver_minor;
    304 	int sc_nvm_addrbits;		/* NVM address bits */
    305 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    306 	int sc_ich8_flash_base;
    307 	int sc_ich8_flash_bank_size;
    308 	int sc_nvm_k1_enabled;
    309 
    310 	/* Software state for the transmit and receive descriptors. */
    311 	int sc_txnum;			/* must be a power of two */
    312 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    313 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    314 
    315 	/* Control data structures. */
    316 	int sc_ntxdesc;			/* must be a power of two */
    317 	struct wm_control_data_82544 *sc_control_data;
    318 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    319 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    320 	int sc_cd_rseg;			/* real number of control segment */
    321 	size_t sc_cd_size;		/* control data size */
    322 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    323 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    324 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    325 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    326 
    327 #ifdef WM_EVENT_COUNTERS
    328 	/* Event counters. */
    329 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    330 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    331 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    332 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    333 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    334 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    335 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    336 
    337 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    338 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    339 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    340 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    341 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    342 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    343 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    344 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    345 
    346 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    347 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    348 
    349 	struct evcnt sc_ev_tu;		/* Tx underrun */
    350 
    351 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    352 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    353 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    354 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    355 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    356 #endif /* WM_EVENT_COUNTERS */
    357 
    358 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    359 
    360 	int	sc_txfree;		/* number of free Tx descriptors */
    361 	int	sc_txnext;		/* next ready Tx descriptor */
    362 
    363 	int	sc_txsfree;		/* number of free Tx jobs */
    364 	int	sc_txsnext;		/* next free Tx job */
    365 	int	sc_txsdirty;		/* dirty Tx jobs */
    366 
    367 	/* These 5 variables are used only on the 82547. */
    368 	int	sc_txfifo_size;		/* Tx FIFO size */
    369 	int	sc_txfifo_head;		/* current head of FIFO */
    370 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    371 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    372 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    373 
    374 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    375 
    376 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    377 	int	sc_rxdiscard;
    378 	int	sc_rxlen;
    379 	struct mbuf *sc_rxhead;
    380 	struct mbuf *sc_rxtail;
    381 	struct mbuf **sc_rxtailp;
    382 
    383 	uint32_t sc_ctrl;		/* prototype CTRL register */
    384 #if 0
    385 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    386 #endif
    387 	uint32_t sc_icr;		/* prototype interrupt bits */
    388 	uint32_t sc_itr;		/* prototype intr throttling reg */
    389 	uint32_t sc_tctl;		/* prototype TCTL register */
    390 	uint32_t sc_rctl;		/* prototype RCTL register */
    391 	uint32_t sc_txcw;		/* prototype TXCW register */
    392 	uint32_t sc_tipg;		/* prototype TIPG register */
    393 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    394 	uint32_t sc_pba;		/* prototype PBA register */
    395 
    396 	int sc_tbi_linkup;		/* TBI link status */
    397 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    398 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    399 
    400 	int sc_mchash_type;		/* multicast filter offset */
    401 
    402 	krndsource_t rnd_source;	/* random source */
    403 
    404 	kmutex_t *sc_tx_lock;		/* lock for tx operations */
    405 	kmutex_t *sc_rx_lock;		/* lock for rx operations */
    406 };
    407 
    408 #define WM_TX_LOCK(_sc)		if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
    409 #define WM_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
    410 #define WM_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
    411 #define WM_RX_LOCK(_sc)		if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
    412 #define WM_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
    413 #define WM_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
    414 #define WM_BOTH_LOCK(_sc)	do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
    415 #define WM_BOTH_UNLOCK(_sc)	do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
    416 #define WM_BOTH_LOCKED(_sc)	(WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
    417 
    418 #ifdef WM_MPSAFE
    419 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    420 #else
    421 #define CALLOUT_FLAGS	0
    422 #endif
    423 
    424 #define	WM_RXCHAIN_RESET(sc)						\
    425 do {									\
    426 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    427 	*(sc)->sc_rxtailp = NULL;					\
    428 	(sc)->sc_rxlen = 0;						\
    429 } while (/*CONSTCOND*/0)
    430 
    431 #define	WM_RXCHAIN_LINK(sc, m)						\
    432 do {									\
    433 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    434 	(sc)->sc_rxtailp = &(m)->m_next;				\
    435 } while (/*CONSTCOND*/0)
    436 
    437 #ifdef WM_EVENT_COUNTERS
    438 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    439 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    440 #else
    441 #define	WM_EVCNT_INCR(ev)	/* nothing */
    442 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    443 #endif
    444 
    445 #define	CSR_READ(sc, reg)						\
    446 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    447 #define	CSR_WRITE(sc, reg, val)						\
    448 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    449 #define	CSR_WRITE_FLUSH(sc)						\
    450 	(void) CSR_READ((sc), WMREG_STATUS)
    451 
    452 #define ICH8_FLASH_READ32(sc, reg) \
    453 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    454 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    455 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    456 
    457 #define ICH8_FLASH_READ16(sc, reg) \
    458 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    459 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    460 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    461 
    462 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    463 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    464 
    465 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    466 #define	WM_CDTXADDR_HI(sc, x)						\
    467 	(sizeof(bus_addr_t) == 8 ?					\
    468 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    469 
    470 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    471 #define	WM_CDRXADDR_HI(sc, x)						\
    472 	(sizeof(bus_addr_t) == 8 ?					\
    473 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    474 
    475 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    476 do {									\
    477 	int __x, __n;							\
    478 									\
    479 	__x = (x);							\
    480 	__n = (n);							\
    481 									\
    482 	/* If it will wrap around, sync to the end of the ring. */	\
    483 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    484 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    485 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    486 		    (WM_NTXDESC(sc) - __x), (ops));			\
    487 		__n -= (WM_NTXDESC(sc) - __x);				\
    488 		__x = 0;						\
    489 	}								\
    490 									\
    491 	/* Now sync whatever is left. */				\
    492 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    493 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    494 } while (/*CONSTCOND*/0)
    495 
    496 #define	WM_CDRXSYNC(sc, x, ops)						\
    497 do {									\
    498 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    499 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    500 } while (/*CONSTCOND*/0)
    501 
    502 #define	WM_INIT_RXDESC(sc, x)						\
    503 do {									\
    504 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    505 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    506 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    507 									\
    508 	/*								\
    509 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    510 	 * so that the payload after the Ethernet header is aligned	\
    511 	 * to a 4-byte boundary.					\
    512 	 *								\
    513 	 * XXX BRAINDAMAGE ALERT!					\
    514 	 * The stupid chip uses the same size for every buffer, which	\
    515 	 * is set in the Receive Control register.  We are using the 2K	\
    516 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    517 	 * reason, we can't "scoot" packets longer than the standard	\
    518 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    519 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    520 	 * the upper layer copy the headers.				\
    521 	 */								\
    522 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    523 									\
    524 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    525 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    526 	__rxd->wrx_len = 0;						\
    527 	__rxd->wrx_cksum = 0;						\
    528 	__rxd->wrx_status = 0;						\
    529 	__rxd->wrx_errors = 0;						\
    530 	__rxd->wrx_special = 0;						\
    531 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    532 									\
    533 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    534 } while (/*CONSTCOND*/0)
    535 
    536 /*
    537  * Register read/write functions.
    538  * Other than CSR_{READ|WRITE}().
    539  */
    540 #if 0
    541 static inline uint32_t wm_io_read(struct wm_softc *, int);
    542 #endif
    543 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    544 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    545 	uint32_t, uint32_t);
    546 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    547 
    548 /*
    549  * Device driver interface functions and commonly used functions.
    550  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    551  */
    552 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    553 static int	wm_match(device_t, cfdata_t, void *);
    554 static void	wm_attach(device_t, device_t, void *);
    555 static int	wm_detach(device_t, int);
    556 static bool	wm_suspend(device_t, const pmf_qual_t *);
    557 static bool	wm_resume(device_t, const pmf_qual_t *);
    558 static void	wm_watchdog(struct ifnet *);
    559 static void	wm_tick(void *);
    560 static int	wm_ifflags_cb(struct ethercom *);
    561 static int	wm_ioctl(struct ifnet *, u_long, void *);
    562 /* MAC address related */
    563 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    564 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    565 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    566 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    567 static void	wm_set_filter(struct wm_softc *);
    568 /* Reset and init related */
    569 static void	wm_set_vlan(struct wm_softc *);
    570 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    571 static void	wm_get_auto_rd_done(struct wm_softc *);
    572 static void	wm_lan_init_done(struct wm_softc *);
    573 static void	wm_get_cfg_done(struct wm_softc *);
    574 static void	wm_initialize_hardware_bits(struct wm_softc *);
    575 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    576 static void	wm_reset(struct wm_softc *);
    577 static int	wm_add_rxbuf(struct wm_softc *, int);
    578 static void	wm_rxdrain(struct wm_softc *);
    579 static int	wm_init(struct ifnet *);
    580 static int	wm_init_locked(struct ifnet *);
    581 static void	wm_stop(struct ifnet *, int);
    582 static void	wm_stop_locked(struct ifnet *, int);
    583 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    584     uint32_t *, uint8_t *);
    585 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    586 static void	wm_82547_txfifo_stall(void *);
    587 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    588 /* Start */
    589 static void	wm_start(struct ifnet *);
    590 static void	wm_start_locked(struct ifnet *);
    591 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    592     uint32_t *, uint32_t *, bool *);
    593 static void	wm_nq_start(struct ifnet *);
    594 static void	wm_nq_start_locked(struct ifnet *);
    595 /* Interrupt */
    596 static void	wm_txintr(struct wm_softc *);
    597 static void	wm_rxintr(struct wm_softc *);
    598 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    599 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    600 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    601 static void	wm_linkintr(struct wm_softc *, uint32_t);
    602 static int	wm_intr(void *);
    603 
    604 /*
    605  * Media related.
    606  * GMII, SGMII, TBI, SERDES and SFP.
    607  */
    608 /* Common */
    609 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    610 /* GMII related */
    611 static void	wm_gmii_reset(struct wm_softc *);
    612 static int	wm_get_phy_id_82575(struct wm_softc *);
    613 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    614 static int	wm_gmii_mediachange(struct ifnet *);
    615 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    616 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    617 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    618 static int	wm_gmii_i82543_readreg(device_t, int, int);
    619 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    620 static int	wm_gmii_i82544_readreg(device_t, int, int);
    621 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    622 static int	wm_gmii_i80003_readreg(device_t, int, int);
    623 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    624 static int	wm_gmii_bm_readreg(device_t, int, int);
    625 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    626 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    627 static int	wm_gmii_hv_readreg(device_t, int, int);
    628 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    629 static int	wm_gmii_82580_readreg(device_t, int, int);
    630 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    631 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    632 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    633 static void	wm_gmii_statchg(struct ifnet *);
    634 static int	wm_kmrn_readreg(struct wm_softc *, int);
    635 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    636 /* SGMII */
    637 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    638 static int	wm_sgmii_readreg(device_t, int, int);
    639 static void	wm_sgmii_writereg(device_t, int, int, int);
    640 /* TBI related */
    641 static void	wm_tbi_mediainit(struct wm_softc *);
    642 static int	wm_tbi_mediachange(struct ifnet *);
    643 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    644 static int	wm_check_for_link(struct wm_softc *);
    645 static void	wm_tbi_tick(struct wm_softc *);
    646 /* SERDES related */
    647 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    648 static int	wm_serdes_mediachange(struct ifnet *);
    649 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    650 static void	wm_serdes_tick(struct wm_softc *);
    651 /* SFP related */
    652 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    653 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    654 
    655 /*
    656  * NVM related.
    657  * Microwire, SPI (w/wo EERD) and Flash.
    658  */
    659 /* Misc functions */
    660 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    661 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    662 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    663 /* Microwire */
    664 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    665 /* SPI */
    666 static int	wm_nvm_ready_spi(struct wm_softc *);
    667 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    668 /* Using with EERD */
    669 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    670 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    671 /* Flash */
    672 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    673     unsigned int *);
    674 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    675 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    676 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    677 	uint16_t *);
    678 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    679 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    680 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    681 /* iNVM */
    682 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    683 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    684 /* Lock, detecting NVM type, validate checksum and read */
    685 static int	wm_nvm_acquire(struct wm_softc *);
    686 static void	wm_nvm_release(struct wm_softc *);
    687 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    688 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    689 static int	wm_nvm_validate_checksum(struct wm_softc *);
    690 static void	wm_nvm_version(struct wm_softc *);
    691 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    692 
    693 /*
    694  * Hardware semaphores.
    695  * Very complexed...
    696  */
    697 static int	wm_get_swsm_semaphore(struct wm_softc *);
    698 static void	wm_put_swsm_semaphore(struct wm_softc *);
    699 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    700 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    701 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    702 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    703 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    704 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    705 
    706 /*
    707  * Management mode and power management related subroutines.
    708  * BMC, AMT, suspend/resume and EEE.
    709  */
    710 static int	wm_check_mng_mode(struct wm_softc *);
    711 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    712 static int	wm_check_mng_mode_82574(struct wm_softc *);
    713 static int	wm_check_mng_mode_generic(struct wm_softc *);
    714 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    715 static int	wm_check_reset_block(struct wm_softc *);
    716 static void	wm_get_hw_control(struct wm_softc *);
    717 static void	wm_release_hw_control(struct wm_softc *);
    718 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    719 static void	wm_smbustopci(struct wm_softc *);
    720 static void	wm_init_manageability(struct wm_softc *);
    721 static void	wm_release_manageability(struct wm_softc *);
    722 static void	wm_get_wakeup(struct wm_softc *);
    723 #ifdef WM_WOL
    724 static void	wm_enable_phy_wakeup(struct wm_softc *);
    725 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    726 static void	wm_enable_wakeup(struct wm_softc *);
    727 #endif
    728 /* EEE */
    729 static void	wm_set_eee_i350(struct wm_softc *);
    730 
    731 /*
    732  * Workarounds (mainly PHY related).
    733  * Basically, PHY's workarounds are in the PHY drivers.
    734  */
    735 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    736 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    737 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    738 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    739 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    740 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    741 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    742 static void	wm_reset_init_script_82575(struct wm_softc *);
    743 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    744 static void	wm_pll_workaround_i210(struct wm_softc *);
    745 
    746 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    747     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    748 
    749 /*
    750  * Devices supported by this driver.
    751  */
    752 static const struct wm_product {
    753 	pci_vendor_id_t		wmp_vendor;
    754 	pci_product_id_t	wmp_product;
    755 	const char		*wmp_name;
    756 	wm_chip_type		wmp_type;
    757 	uint32_t		wmp_flags;
    758 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    759 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    760 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    761 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    762 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    763 } wm_products[] = {
    764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    765 	  "Intel i82542 1000BASE-X Ethernet",
    766 	  WM_T_82542_2_1,	WMP_F_FIBER },
    767 
    768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    769 	  "Intel i82543GC 1000BASE-X Ethernet",
    770 	  WM_T_82543,		WMP_F_FIBER },
    771 
    772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    773 	  "Intel i82543GC 1000BASE-T Ethernet",
    774 	  WM_T_82543,		WMP_F_COPPER },
    775 
    776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    777 	  "Intel i82544EI 1000BASE-T Ethernet",
    778 	  WM_T_82544,		WMP_F_COPPER },
    779 
    780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    781 	  "Intel i82544EI 1000BASE-X Ethernet",
    782 	  WM_T_82544,		WMP_F_FIBER },
    783 
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    785 	  "Intel i82544GC 1000BASE-T Ethernet",
    786 	  WM_T_82544,		WMP_F_COPPER },
    787 
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    789 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    790 	  WM_T_82544,		WMP_F_COPPER },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    793 	  "Intel i82540EM 1000BASE-T Ethernet",
    794 	  WM_T_82540,		WMP_F_COPPER },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    797 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    798 	  WM_T_82540,		WMP_F_COPPER },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    801 	  "Intel i82540EP 1000BASE-T Ethernet",
    802 	  WM_T_82540,		WMP_F_COPPER },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    805 	  "Intel i82540EP 1000BASE-T Ethernet",
    806 	  WM_T_82540,		WMP_F_COPPER },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    809 	  "Intel i82540EP 1000BASE-T Ethernet",
    810 	  WM_T_82540,		WMP_F_COPPER },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    813 	  "Intel i82545EM 1000BASE-T Ethernet",
    814 	  WM_T_82545,		WMP_F_COPPER },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    817 	  "Intel i82545GM 1000BASE-T Ethernet",
    818 	  WM_T_82545_3,		WMP_F_COPPER },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    821 	  "Intel i82545GM 1000BASE-X Ethernet",
    822 	  WM_T_82545_3,		WMP_F_FIBER },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    825 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    826 	  WM_T_82545_3,		WMP_F_SERDES },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    829 	  "Intel i82546EB 1000BASE-T Ethernet",
    830 	  WM_T_82546,		WMP_F_COPPER },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    833 	  "Intel i82546EB 1000BASE-T Ethernet",
    834 	  WM_T_82546,		WMP_F_COPPER },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    837 	  "Intel i82545EM 1000BASE-X Ethernet",
    838 	  WM_T_82545,		WMP_F_FIBER },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    841 	  "Intel i82546EB 1000BASE-X Ethernet",
    842 	  WM_T_82546,		WMP_F_FIBER },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    845 	  "Intel i82546GB 1000BASE-T Ethernet",
    846 	  WM_T_82546_3,		WMP_F_COPPER },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    849 	  "Intel i82546GB 1000BASE-X Ethernet",
    850 	  WM_T_82546_3,		WMP_F_FIBER },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    853 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    854 	  WM_T_82546_3,		WMP_F_SERDES },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    857 	  "i82546GB quad-port Gigabit Ethernet",
    858 	  WM_T_82546_3,		WMP_F_COPPER },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    861 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    862 	  WM_T_82546_3,		WMP_F_COPPER },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    865 	  "Intel PRO/1000MT (82546GB)",
    866 	  WM_T_82546_3,		WMP_F_COPPER },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    869 	  "Intel i82541EI 1000BASE-T Ethernet",
    870 	  WM_T_82541,		WMP_F_COPPER },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    873 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    874 	  WM_T_82541,		WMP_F_COPPER },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    877 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    878 	  WM_T_82541,		WMP_F_COPPER },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    881 	  "Intel i82541ER 1000BASE-T Ethernet",
    882 	  WM_T_82541_2,		WMP_F_COPPER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    885 	  "Intel i82541GI 1000BASE-T Ethernet",
    886 	  WM_T_82541_2,		WMP_F_COPPER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    889 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    890 	  WM_T_82541_2,		WMP_F_COPPER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    893 	  "Intel i82541PI 1000BASE-T Ethernet",
    894 	  WM_T_82541_2,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    897 	  "Intel i82547EI 1000BASE-T Ethernet",
    898 	  WM_T_82547,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    901 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    902 	  WM_T_82547,		WMP_F_COPPER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    905 	  "Intel i82547GI 1000BASE-T Ethernet",
    906 	  WM_T_82547_2,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    909 	  "Intel PRO/1000 PT (82571EB)",
    910 	  WM_T_82571,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    913 	  "Intel PRO/1000 PF (82571EB)",
    914 	  WM_T_82571,		WMP_F_FIBER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    917 	  "Intel PRO/1000 PB (82571EB)",
    918 	  WM_T_82571,		WMP_F_SERDES },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    921 	  "Intel PRO/1000 QT (82571EB)",
    922 	  WM_T_82571,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    925 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    926 	  WM_T_82571,		WMP_F_COPPER, },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    929 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    930 	  WM_T_82571,		WMP_F_COPPER, },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    933 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    934 	  WM_T_82571,		WMP_F_SERDES, },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    937 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    938 	  WM_T_82571,		WMP_F_SERDES, },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    941 	  "Intel 82571EB Quad 1000baseX Ethernet",
    942 	  WM_T_82571,		WMP_F_FIBER, },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    945 	  "Intel i82572EI 1000baseT Ethernet",
    946 	  WM_T_82572,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    949 	  "Intel i82572EI 1000baseX Ethernet",
    950 	  WM_T_82572,		WMP_F_FIBER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    953 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    954 	  WM_T_82572,		WMP_F_SERDES },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    957 	  "Intel i82572EI 1000baseT Ethernet",
    958 	  WM_T_82572,		WMP_F_COPPER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    961 	  "Intel i82573E",
    962 	  WM_T_82573,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    965 	  "Intel i82573E IAMT",
    966 	  WM_T_82573,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    969 	  "Intel i82573L Gigabit Ethernet",
    970 	  WM_T_82573,		WMP_F_COPPER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    973 	  "Intel i82574L",
    974 	  WM_T_82574,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    977 	  "Intel i82574L",
    978 	  WM_T_82574,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    981 	  "Intel i82583V",
    982 	  WM_T_82583,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    985 	  "i80003 dual 1000baseT Ethernet",
    986 	  WM_T_80003,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    989 	  "i80003 dual 1000baseX Ethernet",
    990 	  WM_T_80003,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    993 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    994 	  WM_T_80003,		WMP_F_SERDES },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    997 	  "Intel i80003 1000baseT Ethernet",
    998 	  WM_T_80003,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1001 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1002 	  WM_T_80003,		WMP_F_SERDES },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1005 	  "Intel i82801H (M_AMT) LAN Controller",
   1006 	  WM_T_ICH8,		WMP_F_COPPER },
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1008 	  "Intel i82801H (AMT) LAN Controller",
   1009 	  WM_T_ICH8,		WMP_F_COPPER },
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1011 	  "Intel i82801H LAN Controller",
   1012 	  WM_T_ICH8,		WMP_F_COPPER },
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1014 	  "Intel i82801H (IFE) LAN Controller",
   1015 	  WM_T_ICH8,		WMP_F_COPPER },
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1017 	  "Intel i82801H (M) LAN Controller",
   1018 	  WM_T_ICH8,		WMP_F_COPPER },
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1020 	  "Intel i82801H IFE (GT) LAN Controller",
   1021 	  WM_T_ICH8,		WMP_F_COPPER },
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1023 	  "Intel i82801H IFE (G) LAN Controller",
   1024 	  WM_T_ICH8,		WMP_F_COPPER },
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1026 	  "82801I (AMT) LAN Controller",
   1027 	  WM_T_ICH9,		WMP_F_COPPER },
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1029 	  "82801I LAN Controller",
   1030 	  WM_T_ICH9,		WMP_F_COPPER },
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1032 	  "82801I (G) LAN Controller",
   1033 	  WM_T_ICH9,		WMP_F_COPPER },
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1035 	  "82801I (GT) LAN Controller",
   1036 	  WM_T_ICH9,		WMP_F_COPPER },
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1038 	  "82801I (C) LAN Controller",
   1039 	  WM_T_ICH9,		WMP_F_COPPER },
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1041 	  "82801I mobile LAN Controller",
   1042 	  WM_T_ICH9,		WMP_F_COPPER },
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1044 	  "82801I mobile (V) LAN Controller",
   1045 	  WM_T_ICH9,		WMP_F_COPPER },
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1047 	  "82801I mobile (AMT) LAN Controller",
   1048 	  WM_T_ICH9,		WMP_F_COPPER },
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1050 	  "82567LM-4 LAN Controller",
   1051 	  WM_T_ICH9,		WMP_F_COPPER },
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1053 	  "82567V-3 LAN Controller",
   1054 	  WM_T_ICH9,		WMP_F_COPPER },
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1056 	  "82567LM-2 LAN Controller",
   1057 	  WM_T_ICH10,		WMP_F_COPPER },
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1059 	  "82567LF-2 LAN Controller",
   1060 	  WM_T_ICH10,		WMP_F_COPPER },
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1062 	  "82567LM-3 LAN Controller",
   1063 	  WM_T_ICH10,		WMP_F_COPPER },
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1065 	  "82567LF-3 LAN Controller",
   1066 	  WM_T_ICH10,		WMP_F_COPPER },
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1068 	  "82567V-2 LAN Controller",
   1069 	  WM_T_ICH10,		WMP_F_COPPER },
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1071 	  "82567V-3? LAN Controller",
   1072 	  WM_T_ICH10,		WMP_F_COPPER },
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1074 	  "HANKSVILLE LAN Controller",
   1075 	  WM_T_ICH10,		WMP_F_COPPER },
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1077 	  "PCH LAN (82577LM) Controller",
   1078 	  WM_T_PCH,		WMP_F_COPPER },
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1080 	  "PCH LAN (82577LC) Controller",
   1081 	  WM_T_PCH,		WMP_F_COPPER },
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1083 	  "PCH LAN (82578DM) Controller",
   1084 	  WM_T_PCH,		WMP_F_COPPER },
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1086 	  "PCH LAN (82578DC) Controller",
   1087 	  WM_T_PCH,		WMP_F_COPPER },
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1089 	  "PCH2 LAN (82579LM) Controller",
   1090 	  WM_T_PCH2,		WMP_F_COPPER },
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1092 	  "PCH2 LAN (82579V) Controller",
   1093 	  WM_T_PCH2,		WMP_F_COPPER },
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1095 	  "82575EB dual-1000baseT Ethernet",
   1096 	  WM_T_82575,		WMP_F_COPPER },
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1098 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1099 	  WM_T_82575,		WMP_F_SERDES },
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1101 	  "82575GB quad-1000baseT Ethernet",
   1102 	  WM_T_82575,		WMP_F_COPPER },
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1104 	  "82575GB quad-1000baseT Ethernet (PM)",
   1105 	  WM_T_82575,		WMP_F_COPPER },
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1107 	  "82576 1000BaseT Ethernet",
   1108 	  WM_T_82576,		WMP_F_COPPER },
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1110 	  "82576 1000BaseX Ethernet",
   1111 	  WM_T_82576,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1114 	  "82576 gigabit Ethernet (SERDES)",
   1115 	  WM_T_82576,		WMP_F_SERDES },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1118 	  "82576 quad-1000BaseT Ethernet",
   1119 	  WM_T_82576,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1122 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1123 	  WM_T_82576,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1126 	  "82576 gigabit Ethernet",
   1127 	  WM_T_82576,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1130 	  "82576 gigabit Ethernet (SERDES)",
   1131 	  WM_T_82576,		WMP_F_SERDES },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1133 	  "82576 quad-gigabit Ethernet (SERDES)",
   1134 	  WM_T_82576,		WMP_F_SERDES },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1137 	  "82580 1000BaseT Ethernet",
   1138 	  WM_T_82580,		WMP_F_COPPER },
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1140 	  "82580 1000BaseX Ethernet",
   1141 	  WM_T_82580,		WMP_F_FIBER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1144 	  "82580 1000BaseT Ethernet (SERDES)",
   1145 	  WM_T_82580,		WMP_F_SERDES },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1148 	  "82580 gigabit Ethernet (SGMII)",
   1149 	  WM_T_82580,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1151 	  "82580 dual-1000BaseT Ethernet",
   1152 	  WM_T_82580,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1155 	  "82580 quad-1000BaseX Ethernet",
   1156 	  WM_T_82580,		WMP_F_FIBER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1159 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1160 	  WM_T_82580,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1163 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1164 	  WM_T_82580,		WMP_F_SERDES },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1167 	  "DH89XXCC 1000BASE-KX Ethernet",
   1168 	  WM_T_82580,		WMP_F_SERDES },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1171 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1172 	  WM_T_82580,		WMP_F_SERDES },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1175 	  "I350 Gigabit Network Connection",
   1176 	  WM_T_I350,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1179 	  "I350 Gigabit Fiber Network Connection",
   1180 	  WM_T_I350,		WMP_F_FIBER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1183 	  "I350 Gigabit Backplane Connection",
   1184 	  WM_T_I350,		WMP_F_SERDES },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1187 	  "I350 Quad Port Gigabit Ethernet",
   1188 	  WM_T_I350,		WMP_F_SERDES },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1191 	  "I350 Gigabit Connection",
   1192 	  WM_T_I350,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1195 	  "I354 Gigabit Ethernet (KX)",
   1196 	  WM_T_I354,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1199 	  "I354 Gigabit Ethernet (SGMII)",
   1200 	  WM_T_I354,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1203 	  "I354 Gigabit Ethernet (2.5G)",
   1204 	  WM_T_I354,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1207 	  "I210-T1 Ethernet Server Adapter",
   1208 	  WM_T_I210,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1211 	  "I210 Ethernet (Copper OEM)",
   1212 	  WM_T_I210,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1215 	  "I210 Ethernet (Copper IT)",
   1216 	  WM_T_I210,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1219 	  "I210 Ethernet (FLASH less)",
   1220 	  WM_T_I210,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1223 	  "I210 Gigabit Ethernet (Fiber)",
   1224 	  WM_T_I210,		WMP_F_FIBER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1227 	  "I210 Gigabit Ethernet (SERDES)",
   1228 	  WM_T_I210,		WMP_F_SERDES },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1231 	  "I210 Gigabit Ethernet (FLASH less)",
   1232 	  WM_T_I210,		WMP_F_SERDES },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1235 	  "I210 Gigabit Ethernet (SGMII)",
   1236 	  WM_T_I210,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1239 	  "I211 Ethernet (COPPER)",
   1240 	  WM_T_I211,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1242 	  "I217 V Ethernet Connection",
   1243 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1245 	  "I217 LM Ethernet Connection",
   1246 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1248 	  "I218 V Ethernet Connection",
   1249 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1251 	  "I218 V Ethernet Connection",
   1252 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1254 	  "I218 V Ethernet Connection",
   1255 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1257 	  "I218 LM Ethernet Connection",
   1258 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1260 	  "I218 LM Ethernet Connection",
   1261 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1263 	  "I218 LM Ethernet Connection",
   1264 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1265 	{ 0,			0,
   1266 	  NULL,
   1267 	  0,			0 },
   1268 };
   1269 
   1270 #ifdef WM_EVENT_COUNTERS
   1271 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1272 #endif /* WM_EVENT_COUNTERS */
   1273 
   1274 
   1275 /*
   1276  * Register read/write functions.
   1277  * Other than CSR_{READ|WRITE}().
   1278  */
   1279 
   1280 #if 0 /* Not currently used */
   1281 static inline uint32_t
   1282 wm_io_read(struct wm_softc *sc, int reg)
   1283 {
   1284 
   1285 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1286 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1287 }
   1288 #endif
   1289 
   1290 static inline void
   1291 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1292 {
   1293 
   1294 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1295 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1296 }
   1297 
   1298 static inline void
   1299 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1300     uint32_t data)
   1301 {
   1302 	uint32_t regval;
   1303 	int i;
   1304 
   1305 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1306 
   1307 	CSR_WRITE(sc, reg, regval);
   1308 
   1309 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1310 		delay(5);
   1311 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1312 			break;
   1313 	}
   1314 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1315 		aprint_error("%s: WARNING:"
   1316 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1317 		    device_xname(sc->sc_dev), reg);
   1318 	}
   1319 }
   1320 
   1321 static inline void
   1322 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1323 {
   1324 	wa->wa_low = htole32(v & 0xffffffffU);
   1325 	if (sizeof(bus_addr_t) == 8)
   1326 		wa->wa_high = htole32((uint64_t) v >> 32);
   1327 	else
   1328 		wa->wa_high = 0;
   1329 }
   1330 
   1331 /*
   1332  * Device driver interface functions and commonly used functions.
   1333  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1334  */
   1335 
   1336 /* Lookup supported device table */
   1337 static const struct wm_product *
   1338 wm_lookup(const struct pci_attach_args *pa)
   1339 {
   1340 	const struct wm_product *wmp;
   1341 
   1342 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1343 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1344 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1345 			return wmp;
   1346 	}
   1347 	return NULL;
   1348 }
   1349 
   1350 /* The match function (ca_match) */
   1351 static int
   1352 wm_match(device_t parent, cfdata_t cf, void *aux)
   1353 {
   1354 	struct pci_attach_args *pa = aux;
   1355 
   1356 	if (wm_lookup(pa) != NULL)
   1357 		return 1;
   1358 
   1359 	return 0;
   1360 }
   1361 
   1362 /* The attach function (ca_attach) */
   1363 static void
   1364 wm_attach(device_t parent, device_t self, void *aux)
   1365 {
   1366 	struct wm_softc *sc = device_private(self);
   1367 	struct pci_attach_args *pa = aux;
   1368 	prop_dictionary_t dict;
   1369 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1370 	pci_chipset_tag_t pc = pa->pa_pc;
   1371 	pci_intr_handle_t ih;
   1372 	const char *intrstr = NULL;
   1373 	const char *eetype, *xname;
   1374 	bus_space_tag_t memt;
   1375 	bus_space_handle_t memh;
   1376 	bus_size_t memsize;
   1377 	int memh_valid;
   1378 	int i, error;
   1379 	const struct wm_product *wmp;
   1380 	prop_data_t ea;
   1381 	prop_number_t pn;
   1382 	uint8_t enaddr[ETHER_ADDR_LEN];
   1383 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1384 	pcireg_t preg, memtype;
   1385 	uint16_t eeprom_data, apme_mask;
   1386 	bool force_clear_smbi;
   1387 	uint32_t link_mode;
   1388 	uint32_t reg;
   1389 	char intrbuf[PCI_INTRSTR_LEN];
   1390 
   1391 	sc->sc_dev = self;
   1392 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1393 	sc->sc_stopping = false;
   1394 
   1395 	wmp = wm_lookup(pa);
   1396 #ifdef DIAGNOSTIC
   1397 	if (wmp == NULL) {
   1398 		printf("\n");
   1399 		panic("wm_attach: impossible");
   1400 	}
   1401 #endif
   1402 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1403 
   1404 	sc->sc_pc = pa->pa_pc;
   1405 	sc->sc_pcitag = pa->pa_tag;
   1406 
   1407 	if (pci_dma64_available(pa))
   1408 		sc->sc_dmat = pa->pa_dmat64;
   1409 	else
   1410 		sc->sc_dmat = pa->pa_dmat;
   1411 
   1412 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1413 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1414 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1415 
   1416 	sc->sc_type = wmp->wmp_type;
   1417 	if (sc->sc_type < WM_T_82543) {
   1418 		if (sc->sc_rev < 2) {
   1419 			aprint_error_dev(sc->sc_dev,
   1420 			    "i82542 must be at least rev. 2\n");
   1421 			return;
   1422 		}
   1423 		if (sc->sc_rev < 3)
   1424 			sc->sc_type = WM_T_82542_2_0;
   1425 	}
   1426 
   1427 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1428 	    || (sc->sc_type == WM_T_82580)
   1429 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1430 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1431 		sc->sc_flags |= WM_F_NEWQUEUE;
   1432 
   1433 	/* Set device properties (mactype) */
   1434 	dict = device_properties(sc->sc_dev);
   1435 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1436 
   1437 	/*
   1438 	 * Map the device.  All devices support memory-mapped acccess,
   1439 	 * and it is really required for normal operation.
   1440 	 */
   1441 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1442 	switch (memtype) {
   1443 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1444 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1445 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1446 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1447 		break;
   1448 	default:
   1449 		memh_valid = 0;
   1450 		break;
   1451 	}
   1452 
   1453 	if (memh_valid) {
   1454 		sc->sc_st = memt;
   1455 		sc->sc_sh = memh;
   1456 		sc->sc_ss = memsize;
   1457 	} else {
   1458 		aprint_error_dev(sc->sc_dev,
   1459 		    "unable to map device registers\n");
   1460 		return;
   1461 	}
   1462 
   1463 	/*
   1464 	 * In addition, i82544 and later support I/O mapped indirect
   1465 	 * register access.  It is not desirable (nor supported in
   1466 	 * this driver) to use it for normal operation, though it is
   1467 	 * required to work around bugs in some chip versions.
   1468 	 */
   1469 	if (sc->sc_type >= WM_T_82544) {
   1470 		/* First we have to find the I/O BAR. */
   1471 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1472 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1473 			if (memtype == PCI_MAPREG_TYPE_IO)
   1474 				break;
   1475 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1476 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1477 				i += 4;	/* skip high bits, too */
   1478 		}
   1479 		if (i < PCI_MAPREG_END) {
   1480 			/*
   1481 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1482 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1483 			 * It's no problem because newer chips has no this
   1484 			 * bug.
   1485 			 *
   1486 			 * The i8254x doesn't apparently respond when the
   1487 			 * I/O BAR is 0, which looks somewhat like it's not
   1488 			 * been configured.
   1489 			 */
   1490 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1491 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1492 				aprint_error_dev(sc->sc_dev,
   1493 				    "WARNING: I/O BAR at zero.\n");
   1494 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1495 					0, &sc->sc_iot, &sc->sc_ioh,
   1496 					NULL, &sc->sc_ios) == 0) {
   1497 				sc->sc_flags |= WM_F_IOH_VALID;
   1498 			} else {
   1499 				aprint_error_dev(sc->sc_dev,
   1500 				    "WARNING: unable to map I/O space\n");
   1501 			}
   1502 		}
   1503 
   1504 	}
   1505 
   1506 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1507 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1508 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1509 	if (sc->sc_type < WM_T_82542_2_1)
   1510 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1511 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1512 
   1513 	/* power up chip */
   1514 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1515 	    NULL)) && error != EOPNOTSUPP) {
   1516 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1517 		return;
   1518 	}
   1519 
   1520 	/*
   1521 	 * Map and establish our interrupt.
   1522 	 */
   1523 	if (pci_intr_map(pa, &ih)) {
   1524 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1525 		return;
   1526 	}
   1527 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1528 #ifdef WM_MPSAFE
   1529 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1530 #endif
   1531 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1532 	if (sc->sc_ih == NULL) {
   1533 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1534 		if (intrstr != NULL)
   1535 			aprint_error(" at %s", intrstr);
   1536 		aprint_error("\n");
   1537 		return;
   1538 	}
   1539 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1540 
   1541 	/*
   1542 	 * Check the function ID (unit number of the chip).
   1543 	 */
   1544 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1545 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1546 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1547 	    || (sc->sc_type == WM_T_82580)
   1548 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1549 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1550 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1551 	else
   1552 		sc->sc_funcid = 0;
   1553 
   1554 	/*
   1555 	 * Determine a few things about the bus we're connected to.
   1556 	 */
   1557 	if (sc->sc_type < WM_T_82543) {
   1558 		/* We don't really know the bus characteristics here. */
   1559 		sc->sc_bus_speed = 33;
   1560 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1561 		/*
   1562 		 * CSA (Communication Streaming Architecture) is about as fast
   1563 		 * a 32-bit 66MHz PCI Bus.
   1564 		 */
   1565 		sc->sc_flags |= WM_F_CSA;
   1566 		sc->sc_bus_speed = 66;
   1567 		aprint_verbose_dev(sc->sc_dev,
   1568 		    "Communication Streaming Architecture\n");
   1569 		if (sc->sc_type == WM_T_82547) {
   1570 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1571 			callout_setfunc(&sc->sc_txfifo_ch,
   1572 					wm_82547_txfifo_stall, sc);
   1573 			aprint_verbose_dev(sc->sc_dev,
   1574 			    "using 82547 Tx FIFO stall work-around\n");
   1575 		}
   1576 	} else if (sc->sc_type >= WM_T_82571) {
   1577 		sc->sc_flags |= WM_F_PCIE;
   1578 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1579 		    && (sc->sc_type != WM_T_ICH10)
   1580 		    && (sc->sc_type != WM_T_PCH)
   1581 		    && (sc->sc_type != WM_T_PCH2)
   1582 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1583 			/* ICH* and PCH* have no PCIe capability registers */
   1584 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1585 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1586 				NULL) == 0)
   1587 				aprint_error_dev(sc->sc_dev,
   1588 				    "unable to find PCIe capability\n");
   1589 		}
   1590 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1591 	} else {
   1592 		reg = CSR_READ(sc, WMREG_STATUS);
   1593 		if (reg & STATUS_BUS64)
   1594 			sc->sc_flags |= WM_F_BUS64;
   1595 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1596 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1597 
   1598 			sc->sc_flags |= WM_F_PCIX;
   1599 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1600 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1601 				aprint_error_dev(sc->sc_dev,
   1602 				    "unable to find PCIX capability\n");
   1603 			else if (sc->sc_type != WM_T_82545_3 &&
   1604 				 sc->sc_type != WM_T_82546_3) {
   1605 				/*
   1606 				 * Work around a problem caused by the BIOS
   1607 				 * setting the max memory read byte count
   1608 				 * incorrectly.
   1609 				 */
   1610 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1611 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1612 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1613 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1614 
   1615 				bytecnt =
   1616 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1617 				    PCIX_CMD_BYTECNT_SHIFT;
   1618 				maxb =
   1619 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1620 				    PCIX_STATUS_MAXB_SHIFT;
   1621 				if (bytecnt > maxb) {
   1622 					aprint_verbose_dev(sc->sc_dev,
   1623 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1624 					    512 << bytecnt, 512 << maxb);
   1625 					pcix_cmd = (pcix_cmd &
   1626 					    ~PCIX_CMD_BYTECNT_MASK) |
   1627 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1628 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1629 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1630 					    pcix_cmd);
   1631 				}
   1632 			}
   1633 		}
   1634 		/*
   1635 		 * The quad port adapter is special; it has a PCIX-PCIX
   1636 		 * bridge on the board, and can run the secondary bus at
   1637 		 * a higher speed.
   1638 		 */
   1639 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1640 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1641 								      : 66;
   1642 		} else if (sc->sc_flags & WM_F_PCIX) {
   1643 			switch (reg & STATUS_PCIXSPD_MASK) {
   1644 			case STATUS_PCIXSPD_50_66:
   1645 				sc->sc_bus_speed = 66;
   1646 				break;
   1647 			case STATUS_PCIXSPD_66_100:
   1648 				sc->sc_bus_speed = 100;
   1649 				break;
   1650 			case STATUS_PCIXSPD_100_133:
   1651 				sc->sc_bus_speed = 133;
   1652 				break;
   1653 			default:
   1654 				aprint_error_dev(sc->sc_dev,
   1655 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1656 				    reg & STATUS_PCIXSPD_MASK);
   1657 				sc->sc_bus_speed = 66;
   1658 				break;
   1659 			}
   1660 		} else
   1661 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1662 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1663 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1664 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1665 	}
   1666 
   1667 	/*
   1668 	 * Allocate the control data structures, and create and load the
   1669 	 * DMA map for it.
   1670 	 *
   1671 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1672 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1673 	 * both sets within the same 4G segment.
   1674 	 */
   1675 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1676 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1677 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1678 	    sizeof(struct wm_control_data_82542) :
   1679 	    sizeof(struct wm_control_data_82544);
   1680 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1681 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1682 		    &sc->sc_cd_rseg, 0)) != 0) {
   1683 		aprint_error_dev(sc->sc_dev,
   1684 		    "unable to allocate control data, error = %d\n",
   1685 		    error);
   1686 		goto fail_0;
   1687 	}
   1688 
   1689 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1690 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1691 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1692 		aprint_error_dev(sc->sc_dev,
   1693 		    "unable to map control data, error = %d\n", error);
   1694 		goto fail_1;
   1695 	}
   1696 
   1697 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1698 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1699 		aprint_error_dev(sc->sc_dev,
   1700 		    "unable to create control data DMA map, error = %d\n",
   1701 		    error);
   1702 		goto fail_2;
   1703 	}
   1704 
   1705 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1706 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1707 		aprint_error_dev(sc->sc_dev,
   1708 		    "unable to load control data DMA map, error = %d\n",
   1709 		    error);
   1710 		goto fail_3;
   1711 	}
   1712 
   1713 	/* Create the transmit buffer DMA maps. */
   1714 	WM_TXQUEUELEN(sc) =
   1715 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1716 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1717 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1718 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1719 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1720 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1721 			aprint_error_dev(sc->sc_dev,
   1722 			    "unable to create Tx DMA map %d, error = %d\n",
   1723 			    i, error);
   1724 			goto fail_4;
   1725 		}
   1726 	}
   1727 
   1728 	/* Create the receive buffer DMA maps. */
   1729 	for (i = 0; i < WM_NRXDESC; i++) {
   1730 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1731 			    MCLBYTES, 0, 0,
   1732 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "unable to create Rx DMA map %d error = %d\n",
   1735 			    i, error);
   1736 			goto fail_5;
   1737 		}
   1738 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1739 	}
   1740 
   1741 	/* clear interesting stat counters */
   1742 	CSR_READ(sc, WMREG_COLC);
   1743 	CSR_READ(sc, WMREG_RXERRC);
   1744 
   1745 	/* get PHY control from SMBus to PCIe */
   1746 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1747 	    || (sc->sc_type == WM_T_PCH_LPT))
   1748 		wm_smbustopci(sc);
   1749 
   1750 	/* Reset the chip to a known state. */
   1751 	wm_reset(sc);
   1752 
   1753 	/* Get some information about the EEPROM. */
   1754 	switch (sc->sc_type) {
   1755 	case WM_T_82542_2_0:
   1756 	case WM_T_82542_2_1:
   1757 	case WM_T_82543:
   1758 	case WM_T_82544:
   1759 		/* Microwire */
   1760 		sc->sc_nvm_wordsize = 64;
   1761 		sc->sc_nvm_addrbits = 6;
   1762 		break;
   1763 	case WM_T_82540:
   1764 	case WM_T_82545:
   1765 	case WM_T_82545_3:
   1766 	case WM_T_82546:
   1767 	case WM_T_82546_3:
   1768 		/* Microwire */
   1769 		reg = CSR_READ(sc, WMREG_EECD);
   1770 		if (reg & EECD_EE_SIZE) {
   1771 			sc->sc_nvm_wordsize = 256;
   1772 			sc->sc_nvm_addrbits = 8;
   1773 		} else {
   1774 			sc->sc_nvm_wordsize = 64;
   1775 			sc->sc_nvm_addrbits = 6;
   1776 		}
   1777 		sc->sc_flags |= WM_F_LOCK_EECD;
   1778 		break;
   1779 	case WM_T_82541:
   1780 	case WM_T_82541_2:
   1781 	case WM_T_82547:
   1782 	case WM_T_82547_2:
   1783 		sc->sc_flags |= WM_F_LOCK_EECD;
   1784 		reg = CSR_READ(sc, WMREG_EECD);
   1785 		if (reg & EECD_EE_TYPE) {
   1786 			/* SPI */
   1787 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1788 			wm_nvm_set_addrbits_size_eecd(sc);
   1789 		} else {
   1790 			/* Microwire */
   1791 			if ((reg & EECD_EE_ABITS) != 0) {
   1792 				sc->sc_nvm_wordsize = 256;
   1793 				sc->sc_nvm_addrbits = 8;
   1794 			} else {
   1795 				sc->sc_nvm_wordsize = 64;
   1796 				sc->sc_nvm_addrbits = 6;
   1797 			}
   1798 		}
   1799 		break;
   1800 	case WM_T_82571:
   1801 	case WM_T_82572:
   1802 		/* SPI */
   1803 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1804 		wm_nvm_set_addrbits_size_eecd(sc);
   1805 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1806 		break;
   1807 	case WM_T_82573:
   1808 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1809 		/* FALLTHROUGH */
   1810 	case WM_T_82574:
   1811 	case WM_T_82583:
   1812 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1813 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1814 			sc->sc_nvm_wordsize = 2048;
   1815 		} else {
   1816 			/* SPI */
   1817 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1818 			wm_nvm_set_addrbits_size_eecd(sc);
   1819 		}
   1820 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1821 		break;
   1822 	case WM_T_82575:
   1823 	case WM_T_82576:
   1824 	case WM_T_82580:
   1825 	case WM_T_I350:
   1826 	case WM_T_I354:
   1827 	case WM_T_80003:
   1828 		/* SPI */
   1829 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1830 		wm_nvm_set_addrbits_size_eecd(sc);
   1831 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1832 		    | WM_F_LOCK_SWSM;
   1833 		break;
   1834 	case WM_T_ICH8:
   1835 	case WM_T_ICH9:
   1836 	case WM_T_ICH10:
   1837 	case WM_T_PCH:
   1838 	case WM_T_PCH2:
   1839 	case WM_T_PCH_LPT:
   1840 		/* FLASH */
   1841 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1842 		sc->sc_nvm_wordsize = 2048;
   1843 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1844 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1845 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1846 			aprint_error_dev(sc->sc_dev,
   1847 			    "can't map FLASH registers\n");
   1848 			goto fail_5;
   1849 		}
   1850 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1851 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1852 						ICH_FLASH_SECTOR_SIZE;
   1853 		sc->sc_ich8_flash_bank_size =
   1854 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1855 		sc->sc_ich8_flash_bank_size -=
   1856 		    (reg & ICH_GFPREG_BASE_MASK);
   1857 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1858 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1859 		break;
   1860 	case WM_T_I210:
   1861 	case WM_T_I211:
   1862 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1863 			wm_nvm_set_addrbits_size_eecd(sc);
   1864 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1865 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1866 		} else {
   1867 			sc->sc_nvm_wordsize = INVM_SIZE;
   1868 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1869 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1870 		}
   1871 		break;
   1872 	default:
   1873 		break;
   1874 	}
   1875 
   1876 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1877 	switch (sc->sc_type) {
   1878 	case WM_T_82571:
   1879 	case WM_T_82572:
   1880 		reg = CSR_READ(sc, WMREG_SWSM2);
   1881 		if ((reg & SWSM2_LOCK) == 0) {
   1882 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1883 			force_clear_smbi = true;
   1884 		} else
   1885 			force_clear_smbi = false;
   1886 		break;
   1887 	case WM_T_82573:
   1888 	case WM_T_82574:
   1889 	case WM_T_82583:
   1890 		force_clear_smbi = true;
   1891 		break;
   1892 	default:
   1893 		force_clear_smbi = false;
   1894 		break;
   1895 	}
   1896 	if (force_clear_smbi) {
   1897 		reg = CSR_READ(sc, WMREG_SWSM);
   1898 		if ((reg & SWSM_SMBI) != 0)
   1899 			aprint_error_dev(sc->sc_dev,
   1900 			    "Please update the Bootagent\n");
   1901 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1902 	}
   1903 
   1904 	/*
   1905 	 * Defer printing the EEPROM type until after verifying the checksum
   1906 	 * This allows the EEPROM type to be printed correctly in the case
   1907 	 * that no EEPROM is attached.
   1908 	 */
   1909 	/*
   1910 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1911 	 * this for later, so we can fail future reads from the EEPROM.
   1912 	 */
   1913 	if (wm_nvm_validate_checksum(sc)) {
   1914 		/*
   1915 		 * Read twice again because some PCI-e parts fail the
   1916 		 * first check due to the link being in sleep state.
   1917 		 */
   1918 		if (wm_nvm_validate_checksum(sc))
   1919 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1920 	}
   1921 
   1922 	/* Set device properties (macflags) */
   1923 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1924 
   1925 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1926 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   1927 	else {
   1928 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   1929 		    sc->sc_nvm_wordsize);
   1930 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   1931 			aprint_verbose("iNVM");
   1932 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   1933 			aprint_verbose("FLASH(HW)");
   1934 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   1935 			aprint_verbose("FLASH");
   1936 		else {
   1937 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   1938 				eetype = "SPI";
   1939 			else
   1940 				eetype = "MicroWire";
   1941 			aprint_verbose("(%d address bits) %s EEPROM",
   1942 			    sc->sc_nvm_addrbits, eetype);
   1943 		}
   1944 	}
   1945 	wm_nvm_version(sc);
   1946 	aprint_verbose("\n");
   1947 
   1948 	/* Check for I21[01] PLL workaround */
   1949 	if (sc->sc_type == WM_T_I210)
   1950 		sc->sc_flags |= WM_F_PLL_WA_I210;
   1951 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   1952 		/* NVM image release 3.25 has a workaround */
   1953 		if ((sc->sc_nvm_ver_major > 3)
   1954 		    || ((sc->sc_nvm_ver_major == 3)
   1955 			&& (sc->sc_nvm_ver_minor >= 25)))
   1956 			return;
   1957 		else {
   1958 			aprint_verbose_dev(sc->sc_dev,
   1959 			    "ROM image version %d.%d is older than 3.25\n",
   1960 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   1961 			sc->sc_flags |= WM_F_PLL_WA_I210;
   1962 		}
   1963 	}
   1964 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   1965 		wm_pll_workaround_i210(sc);
   1966 
   1967 	switch (sc->sc_type) {
   1968 	case WM_T_82571:
   1969 	case WM_T_82572:
   1970 	case WM_T_82573:
   1971 	case WM_T_82574:
   1972 	case WM_T_82583:
   1973 	case WM_T_80003:
   1974 	case WM_T_ICH8:
   1975 	case WM_T_ICH9:
   1976 	case WM_T_ICH10:
   1977 	case WM_T_PCH:
   1978 	case WM_T_PCH2:
   1979 	case WM_T_PCH_LPT:
   1980 		if (wm_check_mng_mode(sc) != 0)
   1981 			wm_get_hw_control(sc);
   1982 		break;
   1983 	default:
   1984 		break;
   1985 	}
   1986 	wm_get_wakeup(sc);
   1987 	/*
   1988 	 * Read the Ethernet address from the EEPROM, if not first found
   1989 	 * in device properties.
   1990 	 */
   1991 	ea = prop_dictionary_get(dict, "mac-address");
   1992 	if (ea != NULL) {
   1993 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1994 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1995 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1996 	} else {
   1997 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1998 			aprint_error_dev(sc->sc_dev,
   1999 			    "unable to read Ethernet address\n");
   2000 			goto fail_5;
   2001 		}
   2002 	}
   2003 
   2004 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2005 	    ether_sprintf(enaddr));
   2006 
   2007 	/*
   2008 	 * Read the config info from the EEPROM, and set up various
   2009 	 * bits in the control registers based on their contents.
   2010 	 */
   2011 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2012 	if (pn != NULL) {
   2013 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2014 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2015 	} else {
   2016 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2017 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2018 			goto fail_5;
   2019 		}
   2020 	}
   2021 
   2022 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2023 	if (pn != NULL) {
   2024 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2025 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2026 	} else {
   2027 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2028 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2029 			goto fail_5;
   2030 		}
   2031 	}
   2032 
   2033 	/* check for WM_F_WOL */
   2034 	switch (sc->sc_type) {
   2035 	case WM_T_82542_2_0:
   2036 	case WM_T_82542_2_1:
   2037 	case WM_T_82543:
   2038 		/* dummy? */
   2039 		eeprom_data = 0;
   2040 		apme_mask = NVM_CFG3_APME;
   2041 		break;
   2042 	case WM_T_82544:
   2043 		apme_mask = NVM_CFG2_82544_APM_EN;
   2044 		eeprom_data = cfg2;
   2045 		break;
   2046 	case WM_T_82546:
   2047 	case WM_T_82546_3:
   2048 	case WM_T_82571:
   2049 	case WM_T_82572:
   2050 	case WM_T_82573:
   2051 	case WM_T_82574:
   2052 	case WM_T_82583:
   2053 	case WM_T_80003:
   2054 	default:
   2055 		apme_mask = NVM_CFG3_APME;
   2056 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2057 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2058 		break;
   2059 	case WM_T_82575:
   2060 	case WM_T_82576:
   2061 	case WM_T_82580:
   2062 	case WM_T_I350:
   2063 	case WM_T_I354: /* XXX ok? */
   2064 	case WM_T_ICH8:
   2065 	case WM_T_ICH9:
   2066 	case WM_T_ICH10:
   2067 	case WM_T_PCH:
   2068 	case WM_T_PCH2:
   2069 	case WM_T_PCH_LPT:
   2070 		/* XXX The funcid should be checked on some devices */
   2071 		apme_mask = WUC_APME;
   2072 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2073 		break;
   2074 	}
   2075 
   2076 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2077 	if ((eeprom_data & apme_mask) != 0)
   2078 		sc->sc_flags |= WM_F_WOL;
   2079 #ifdef WM_DEBUG
   2080 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2081 		printf("WOL\n");
   2082 #endif
   2083 
   2084 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2085 		/* Check NVM for autonegotiation */
   2086 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2087 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2088 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2089 		}
   2090 	}
   2091 
   2092 	/*
   2093 	 * XXX need special handling for some multiple port cards
   2094 	 * to disable a paticular port.
   2095 	 */
   2096 
   2097 	if (sc->sc_type >= WM_T_82544) {
   2098 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2099 		if (pn != NULL) {
   2100 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2101 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2102 		} else {
   2103 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2104 				aprint_error_dev(sc->sc_dev,
   2105 				    "unable to read SWDPIN\n");
   2106 				goto fail_5;
   2107 			}
   2108 		}
   2109 	}
   2110 
   2111 	if (cfg1 & NVM_CFG1_ILOS)
   2112 		sc->sc_ctrl |= CTRL_ILOS;
   2113 
   2114 	/*
   2115 	 * XXX
   2116 	 * This code isn't correct because pin 2 and 3 are located
   2117 	 * in different position on newer chips. Check all datasheet.
   2118 	 *
   2119 	 * Until resolve this problem, check if a chip < 82580
   2120 	 */
   2121 	if (sc->sc_type <= WM_T_82580) {
   2122 		if (sc->sc_type >= WM_T_82544) {
   2123 			sc->sc_ctrl |=
   2124 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2125 			    CTRL_SWDPIO_SHIFT;
   2126 			sc->sc_ctrl |=
   2127 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2128 			    CTRL_SWDPINS_SHIFT;
   2129 		} else {
   2130 			sc->sc_ctrl |=
   2131 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2132 			    CTRL_SWDPIO_SHIFT;
   2133 		}
   2134 	}
   2135 
   2136 	/* XXX For other than 82580? */
   2137 	if (sc->sc_type == WM_T_82580) {
   2138 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2139 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2140 		if (nvmword & __BIT(13)) {
   2141 			printf("SET ILOS\n");
   2142 			sc->sc_ctrl |= CTRL_ILOS;
   2143 		}
   2144 	}
   2145 
   2146 #if 0
   2147 	if (sc->sc_type >= WM_T_82544) {
   2148 		if (cfg1 & NVM_CFG1_IPS0)
   2149 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2150 		if (cfg1 & NVM_CFG1_IPS1)
   2151 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2152 		sc->sc_ctrl_ext |=
   2153 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2154 		    CTRL_EXT_SWDPIO_SHIFT;
   2155 		sc->sc_ctrl_ext |=
   2156 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2157 		    CTRL_EXT_SWDPINS_SHIFT;
   2158 	} else {
   2159 		sc->sc_ctrl_ext |=
   2160 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2161 		    CTRL_EXT_SWDPIO_SHIFT;
   2162 	}
   2163 #endif
   2164 
   2165 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2166 #if 0
   2167 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2168 #endif
   2169 
   2170 	/*
   2171 	 * Set up some register offsets that are different between
   2172 	 * the i82542 and the i82543 and later chips.
   2173 	 */
   2174 	if (sc->sc_type < WM_T_82543) {
   2175 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   2176 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   2177 	} else {
   2178 		sc->sc_rdt_reg = WMREG_RDT;
   2179 		sc->sc_tdt_reg = WMREG_TDT;
   2180 	}
   2181 
   2182 	if (sc->sc_type == WM_T_PCH) {
   2183 		uint16_t val;
   2184 
   2185 		/* Save the NVM K1 bit setting */
   2186 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2187 
   2188 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2189 			sc->sc_nvm_k1_enabled = 1;
   2190 		else
   2191 			sc->sc_nvm_k1_enabled = 0;
   2192 	}
   2193 
   2194 	/*
   2195 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2196 	 * media structures accordingly.
   2197 	 */
   2198 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2199 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2200 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2201 	    || sc->sc_type == WM_T_82573
   2202 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2203 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2204 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2205 	} else if (sc->sc_type < WM_T_82543 ||
   2206 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2207 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2208 			aprint_error_dev(sc->sc_dev,
   2209 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2210 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2211 		}
   2212 		wm_tbi_mediainit(sc);
   2213 	} else {
   2214 		switch (sc->sc_type) {
   2215 		case WM_T_82575:
   2216 		case WM_T_82576:
   2217 		case WM_T_82580:
   2218 		case WM_T_I350:
   2219 		case WM_T_I354:
   2220 		case WM_T_I210:
   2221 		case WM_T_I211:
   2222 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2223 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2224 			switch (link_mode) {
   2225 			case CTRL_EXT_LINK_MODE_1000KX:
   2226 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2227 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2228 				break;
   2229 			case CTRL_EXT_LINK_MODE_SGMII:
   2230 				if (wm_sgmii_uses_mdio(sc)) {
   2231 					aprint_verbose_dev(sc->sc_dev,
   2232 					    "SGMII(MDIO)\n");
   2233 					sc->sc_flags |= WM_F_SGMII;
   2234 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2235 					break;
   2236 				}
   2237 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2238 				/*FALLTHROUGH*/
   2239 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2240 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2241 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2242 					if (link_mode
   2243 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2244 						sc->sc_mediatype
   2245 						    = WM_MEDIATYPE_COPPER;
   2246 						sc->sc_flags |= WM_F_SGMII;
   2247 					} else {
   2248 						sc->sc_mediatype
   2249 						    = WM_MEDIATYPE_SERDES;
   2250 						aprint_verbose_dev(sc->sc_dev,
   2251 						    "SERDES\n");
   2252 					}
   2253 					break;
   2254 				}
   2255 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2256 					aprint_verbose_dev(sc->sc_dev,
   2257 					    "SERDES\n");
   2258 
   2259 				/* Change current link mode setting */
   2260 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2261 				switch (sc->sc_mediatype) {
   2262 				case WM_MEDIATYPE_COPPER:
   2263 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2264 					break;
   2265 				case WM_MEDIATYPE_SERDES:
   2266 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2267 					break;
   2268 				default:
   2269 					break;
   2270 				}
   2271 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2272 				break;
   2273 			case CTRL_EXT_LINK_MODE_GMII:
   2274 			default:
   2275 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2276 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2277 				break;
   2278 			}
   2279 
   2280 			reg &= ~CTRL_EXT_I2C_ENA;
   2281 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2282 				reg |= CTRL_EXT_I2C_ENA;
   2283 			else
   2284 				reg &= ~CTRL_EXT_I2C_ENA;
   2285 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2286 
   2287 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2288 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2289 			else
   2290 				wm_tbi_mediainit(sc);
   2291 			break;
   2292 		default:
   2293 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2294 				aprint_error_dev(sc->sc_dev,
   2295 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2296 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2297 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2298 		}
   2299 	}
   2300 
   2301 	ifp = &sc->sc_ethercom.ec_if;
   2302 	xname = device_xname(sc->sc_dev);
   2303 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2304 	ifp->if_softc = sc;
   2305 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2306 	ifp->if_ioctl = wm_ioctl;
   2307 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2308 		ifp->if_start = wm_nq_start;
   2309 	else
   2310 		ifp->if_start = wm_start;
   2311 	ifp->if_watchdog = wm_watchdog;
   2312 	ifp->if_init = wm_init;
   2313 	ifp->if_stop = wm_stop;
   2314 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2315 	IFQ_SET_READY(&ifp->if_snd);
   2316 
   2317 	/* Check for jumbo frame */
   2318 	switch (sc->sc_type) {
   2319 	case WM_T_82573:
   2320 		/* XXX limited to 9234 if ASPM is disabled */
   2321 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2322 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2323 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2324 		break;
   2325 	case WM_T_82571:
   2326 	case WM_T_82572:
   2327 	case WM_T_82574:
   2328 	case WM_T_82575:
   2329 	case WM_T_82576:
   2330 	case WM_T_82580:
   2331 	case WM_T_I350:
   2332 	case WM_T_I354: /* XXXX ok? */
   2333 	case WM_T_I210:
   2334 	case WM_T_I211:
   2335 	case WM_T_80003:
   2336 	case WM_T_ICH9:
   2337 	case WM_T_ICH10:
   2338 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2339 	case WM_T_PCH_LPT:
   2340 		/* XXX limited to 9234 */
   2341 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2342 		break;
   2343 	case WM_T_PCH:
   2344 		/* XXX limited to 4096 */
   2345 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2346 		break;
   2347 	case WM_T_82542_2_0:
   2348 	case WM_T_82542_2_1:
   2349 	case WM_T_82583:
   2350 	case WM_T_ICH8:
   2351 		/* No support for jumbo frame */
   2352 		break;
   2353 	default:
   2354 		/* ETHER_MAX_LEN_JUMBO */
   2355 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2356 		break;
   2357 	}
   2358 
   2359 	/* If we're a i82543 or greater, we can support VLANs. */
   2360 	if (sc->sc_type >= WM_T_82543)
   2361 		sc->sc_ethercom.ec_capabilities |=
   2362 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2363 
   2364 	/*
   2365 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2366 	 * on i82543 and later.
   2367 	 */
   2368 	if (sc->sc_type >= WM_T_82543) {
   2369 		ifp->if_capabilities |=
   2370 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2371 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2372 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2373 		    IFCAP_CSUM_TCPv6_Tx |
   2374 		    IFCAP_CSUM_UDPv6_Tx;
   2375 	}
   2376 
   2377 	/*
   2378 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2379 	 *
   2380 	 *	82541GI (8086:1076) ... no
   2381 	 *	82572EI (8086:10b9) ... yes
   2382 	 */
   2383 	if (sc->sc_type >= WM_T_82571) {
   2384 		ifp->if_capabilities |=
   2385 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2386 	}
   2387 
   2388 	/*
   2389 	 * If we're a i82544 or greater (except i82547), we can do
   2390 	 * TCP segmentation offload.
   2391 	 */
   2392 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2393 		ifp->if_capabilities |= IFCAP_TSOv4;
   2394 	}
   2395 
   2396 	if (sc->sc_type >= WM_T_82571) {
   2397 		ifp->if_capabilities |= IFCAP_TSOv6;
   2398 	}
   2399 
   2400 #ifdef WM_MPSAFE
   2401 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2402 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2403 #else
   2404 	sc->sc_tx_lock = NULL;
   2405 	sc->sc_rx_lock = NULL;
   2406 #endif
   2407 
   2408 	/* Attach the interface. */
   2409 	if_attach(ifp);
   2410 	ether_ifattach(ifp, enaddr);
   2411 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2412 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2413 			  RND_FLAG_DEFAULT);
   2414 
   2415 #ifdef WM_EVENT_COUNTERS
   2416 	/* Attach event counters. */
   2417 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2418 	    NULL, xname, "txsstall");
   2419 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2420 	    NULL, xname, "txdstall");
   2421 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2422 	    NULL, xname, "txfifo_stall");
   2423 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2424 	    NULL, xname, "txdw");
   2425 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2426 	    NULL, xname, "txqe");
   2427 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2428 	    NULL, xname, "rxintr");
   2429 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2430 	    NULL, xname, "linkintr");
   2431 
   2432 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2433 	    NULL, xname, "rxipsum");
   2434 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2435 	    NULL, xname, "rxtusum");
   2436 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2437 	    NULL, xname, "txipsum");
   2438 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2439 	    NULL, xname, "txtusum");
   2440 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2441 	    NULL, xname, "txtusum6");
   2442 
   2443 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2444 	    NULL, xname, "txtso");
   2445 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2446 	    NULL, xname, "txtso6");
   2447 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2448 	    NULL, xname, "txtsopain");
   2449 
   2450 	for (i = 0; i < WM_NTXSEGS; i++) {
   2451 		snprintf(wm_txseg_evcnt_names[i],
   2452 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2453 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2454 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2455 	}
   2456 
   2457 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2458 	    NULL, xname, "txdrop");
   2459 
   2460 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2461 	    NULL, xname, "tu");
   2462 
   2463 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2464 	    NULL, xname, "tx_xoff");
   2465 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2466 	    NULL, xname, "tx_xon");
   2467 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2468 	    NULL, xname, "rx_xoff");
   2469 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2470 	    NULL, xname, "rx_xon");
   2471 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2472 	    NULL, xname, "rx_macctl");
   2473 #endif /* WM_EVENT_COUNTERS */
   2474 
   2475 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2476 		pmf_class_network_register(self, ifp);
   2477 	else
   2478 		aprint_error_dev(self, "couldn't establish power handler\n");
   2479 
   2480 	sc->sc_flags |= WM_F_ATTACHED;
   2481 	return;
   2482 
   2483 	/*
   2484 	 * Free any resources we've allocated during the failed attach
   2485 	 * attempt.  Do this in reverse order and fall through.
   2486 	 */
   2487  fail_5:
   2488 	for (i = 0; i < WM_NRXDESC; i++) {
   2489 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2490 			bus_dmamap_destroy(sc->sc_dmat,
   2491 			    sc->sc_rxsoft[i].rxs_dmamap);
   2492 	}
   2493  fail_4:
   2494 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2495 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2496 			bus_dmamap_destroy(sc->sc_dmat,
   2497 			    sc->sc_txsoft[i].txs_dmamap);
   2498 	}
   2499 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2500  fail_3:
   2501 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2502  fail_2:
   2503 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2504 	    sc->sc_cd_size);
   2505  fail_1:
   2506 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2507  fail_0:
   2508 	return;
   2509 }
   2510 
   2511 /* The detach function (ca_detach) */
   2512 static int
   2513 wm_detach(device_t self, int flags __unused)
   2514 {
   2515 	struct wm_softc *sc = device_private(self);
   2516 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2517 	int i;
   2518 #ifndef WM_MPSAFE
   2519 	int s;
   2520 #endif
   2521 
   2522 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2523 		return 0;
   2524 
   2525 #ifndef WM_MPSAFE
   2526 	s = splnet();
   2527 #endif
   2528 	/* Stop the interface. Callouts are stopped in it. */
   2529 	wm_stop(ifp, 1);
   2530 
   2531 #ifndef WM_MPSAFE
   2532 	splx(s);
   2533 #endif
   2534 
   2535 	pmf_device_deregister(self);
   2536 
   2537 	/* Tell the firmware about the release */
   2538 	WM_BOTH_LOCK(sc);
   2539 	wm_release_manageability(sc);
   2540 	wm_release_hw_control(sc);
   2541 	WM_BOTH_UNLOCK(sc);
   2542 
   2543 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2544 
   2545 	/* Delete all remaining media. */
   2546 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2547 
   2548 	ether_ifdetach(ifp);
   2549 	if_detach(ifp);
   2550 
   2551 
   2552 	/* Unload RX dmamaps and free mbufs */
   2553 	WM_RX_LOCK(sc);
   2554 	wm_rxdrain(sc);
   2555 	WM_RX_UNLOCK(sc);
   2556 	/* Must unlock here */
   2557 
   2558 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2559 	for (i = 0; i < WM_NRXDESC; i++) {
   2560 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2561 			bus_dmamap_destroy(sc->sc_dmat,
   2562 			    sc->sc_rxsoft[i].rxs_dmamap);
   2563 	}
   2564 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2565 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2566 			bus_dmamap_destroy(sc->sc_dmat,
   2567 			    sc->sc_txsoft[i].txs_dmamap);
   2568 	}
   2569 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2570 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2571 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2572 	    sc->sc_cd_size);
   2573 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2574 
   2575 	/* Disestablish the interrupt handler */
   2576 	if (sc->sc_ih != NULL) {
   2577 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2578 		sc->sc_ih = NULL;
   2579 	}
   2580 
   2581 	/* Unmap the registers */
   2582 	if (sc->sc_ss) {
   2583 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2584 		sc->sc_ss = 0;
   2585 	}
   2586 
   2587 	if (sc->sc_ios) {
   2588 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2589 		sc->sc_ios = 0;
   2590 	}
   2591 
   2592 	if (sc->sc_tx_lock)
   2593 		mutex_obj_free(sc->sc_tx_lock);
   2594 	if (sc->sc_rx_lock)
   2595 		mutex_obj_free(sc->sc_rx_lock);
   2596 
   2597 	return 0;
   2598 }
   2599 
   2600 static bool
   2601 wm_suspend(device_t self, const pmf_qual_t *qual)
   2602 {
   2603 	struct wm_softc *sc = device_private(self);
   2604 
   2605 	wm_release_manageability(sc);
   2606 	wm_release_hw_control(sc);
   2607 #ifdef WM_WOL
   2608 	wm_enable_wakeup(sc);
   2609 #endif
   2610 
   2611 	return true;
   2612 }
   2613 
   2614 static bool
   2615 wm_resume(device_t self, const pmf_qual_t *qual)
   2616 {
   2617 	struct wm_softc *sc = device_private(self);
   2618 
   2619 	wm_init_manageability(sc);
   2620 
   2621 	return true;
   2622 }
   2623 
   2624 /*
   2625  * wm_watchdog:		[ifnet interface function]
   2626  *
   2627  *	Watchdog timer handler.
   2628  */
   2629 static void
   2630 wm_watchdog(struct ifnet *ifp)
   2631 {
   2632 	struct wm_softc *sc = ifp->if_softc;
   2633 
   2634 	/*
   2635 	 * Since we're using delayed interrupts, sweep up
   2636 	 * before we report an error.
   2637 	 */
   2638 	WM_TX_LOCK(sc);
   2639 	wm_txintr(sc);
   2640 	WM_TX_UNLOCK(sc);
   2641 
   2642 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2643 #ifdef WM_DEBUG
   2644 		int i, j;
   2645 		struct wm_txsoft *txs;
   2646 #endif
   2647 		log(LOG_ERR,
   2648 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2649 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   2650 		    sc->sc_txnext);
   2651 		ifp->if_oerrors++;
   2652 #ifdef WM_DEBUG
   2653 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   2654 		    i = WM_NEXTTXS(sc, i)) {
   2655 		    txs = &sc->sc_txsoft[i];
   2656 		    printf("txs %d tx %d -> %d\n",
   2657 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2658 		    for (j = txs->txs_firstdesc; ;
   2659 			j = WM_NEXTTX(sc, j)) {
   2660 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2661 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2662 			printf("\t %#08x%08x\n",
   2663 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2664 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2665 			if (j == txs->txs_lastdesc)
   2666 				break;
   2667 			}
   2668 		}
   2669 #endif
   2670 		/* Reset the interface. */
   2671 		(void) wm_init(ifp);
   2672 	}
   2673 
   2674 	/* Try to get more packets going. */
   2675 	ifp->if_start(ifp);
   2676 }
   2677 
   2678 /*
   2679  * wm_tick:
   2680  *
   2681  *	One second timer, used to check link status, sweep up
   2682  *	completed transmit jobs, etc.
   2683  */
   2684 static void
   2685 wm_tick(void *arg)
   2686 {
   2687 	struct wm_softc *sc = arg;
   2688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2689 #ifndef WM_MPSAFE
   2690 	int s;
   2691 
   2692 	s = splnet();
   2693 #endif
   2694 
   2695 	WM_TX_LOCK(sc);
   2696 
   2697 	if (sc->sc_stopping)
   2698 		goto out;
   2699 
   2700 	if (sc->sc_type >= WM_T_82542_2_1) {
   2701 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2702 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2703 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2704 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2705 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2706 	}
   2707 
   2708 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2709 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2710 	    + CSR_READ(sc, WMREG_CRCERRS)
   2711 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2712 	    + CSR_READ(sc, WMREG_SYMERRC)
   2713 	    + CSR_READ(sc, WMREG_RXERRC)
   2714 	    + CSR_READ(sc, WMREG_SEC)
   2715 	    + CSR_READ(sc, WMREG_CEXTERR)
   2716 	    + CSR_READ(sc, WMREG_RLEC);
   2717 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2718 
   2719 	if (sc->sc_flags & WM_F_HAS_MII)
   2720 		mii_tick(&sc->sc_mii);
   2721 	else if ((sc->sc_type >= WM_T_82575)
   2722 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2723 		wm_serdes_tick(sc);
   2724 	else
   2725 		wm_tbi_tick(sc);
   2726 
   2727 out:
   2728 	WM_TX_UNLOCK(sc);
   2729 #ifndef WM_MPSAFE
   2730 	splx(s);
   2731 #endif
   2732 
   2733 	if (!sc->sc_stopping)
   2734 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2735 }
   2736 
   2737 static int
   2738 wm_ifflags_cb(struct ethercom *ec)
   2739 {
   2740 	struct ifnet *ifp = &ec->ec_if;
   2741 	struct wm_softc *sc = ifp->if_softc;
   2742 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2743 	int rc = 0;
   2744 
   2745 	WM_BOTH_LOCK(sc);
   2746 
   2747 	if (change != 0)
   2748 		sc->sc_if_flags = ifp->if_flags;
   2749 
   2750 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2751 		rc = ENETRESET;
   2752 		goto out;
   2753 	}
   2754 
   2755 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2756 		wm_set_filter(sc);
   2757 
   2758 	wm_set_vlan(sc);
   2759 
   2760 out:
   2761 	WM_BOTH_UNLOCK(sc);
   2762 
   2763 	return rc;
   2764 }
   2765 
   2766 /*
   2767  * wm_ioctl:		[ifnet interface function]
   2768  *
   2769  *	Handle control requests from the operator.
   2770  */
   2771 static int
   2772 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2773 {
   2774 	struct wm_softc *sc = ifp->if_softc;
   2775 	struct ifreq *ifr = (struct ifreq *) data;
   2776 	struct ifaddr *ifa = (struct ifaddr *)data;
   2777 	struct sockaddr_dl *sdl;
   2778 	int s, error;
   2779 
   2780 #ifndef WM_MPSAFE
   2781 	s = splnet();
   2782 #endif
   2783 	switch (cmd) {
   2784 	case SIOCSIFMEDIA:
   2785 	case SIOCGIFMEDIA:
   2786 		WM_BOTH_LOCK(sc);
   2787 		/* Flow control requires full-duplex mode. */
   2788 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2789 		    (ifr->ifr_media & IFM_FDX) == 0)
   2790 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2791 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2792 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2793 				/* We can do both TXPAUSE and RXPAUSE. */
   2794 				ifr->ifr_media |=
   2795 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2796 			}
   2797 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2798 		}
   2799 		WM_BOTH_UNLOCK(sc);
   2800 #ifdef WM_MPSAFE
   2801 		s = splnet();
   2802 #endif
   2803 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2804 #ifdef WM_MPSAFE
   2805 		splx(s);
   2806 #endif
   2807 		break;
   2808 	case SIOCINITIFADDR:
   2809 		WM_BOTH_LOCK(sc);
   2810 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2811 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2812 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2813 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2814 			/* unicast address is first multicast entry */
   2815 			wm_set_filter(sc);
   2816 			error = 0;
   2817 			WM_BOTH_UNLOCK(sc);
   2818 			break;
   2819 		}
   2820 		WM_BOTH_UNLOCK(sc);
   2821 		/*FALLTHROUGH*/
   2822 	default:
   2823 #ifdef WM_MPSAFE
   2824 		s = splnet();
   2825 #endif
   2826 		/* It may call wm_start, so unlock here */
   2827 		error = ether_ioctl(ifp, cmd, data);
   2828 #ifdef WM_MPSAFE
   2829 		splx(s);
   2830 #endif
   2831 		if (error != ENETRESET)
   2832 			break;
   2833 
   2834 		error = 0;
   2835 
   2836 		if (cmd == SIOCSIFCAP) {
   2837 			error = (*ifp->if_init)(ifp);
   2838 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2839 			;
   2840 		else if (ifp->if_flags & IFF_RUNNING) {
   2841 			/*
   2842 			 * Multicast list has changed; set the hardware filter
   2843 			 * accordingly.
   2844 			 */
   2845 			WM_BOTH_LOCK(sc);
   2846 			wm_set_filter(sc);
   2847 			WM_BOTH_UNLOCK(sc);
   2848 		}
   2849 		break;
   2850 	}
   2851 
   2852 #ifndef WM_MPSAFE
   2853 	splx(s);
   2854 #endif
   2855 	return error;
   2856 }
   2857 
   2858 /* MAC address related */
   2859 
   2860 /*
   2861  * Get the offset of MAC address and return it.
   2862  * If error occured, use offset 0.
   2863  */
   2864 static uint16_t
   2865 wm_check_alt_mac_addr(struct wm_softc *sc)
   2866 {
   2867 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2868 	uint16_t offset = NVM_OFF_MACADDR;
   2869 
   2870 	/* Try to read alternative MAC address pointer */
   2871 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2872 		return 0;
   2873 
   2874 	/* Check pointer if it's valid or not. */
   2875 	if ((offset == 0x0000) || (offset == 0xffff))
   2876 		return 0;
   2877 
   2878 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2879 	/*
   2880 	 * Check whether alternative MAC address is valid or not.
   2881 	 * Some cards have non 0xffff pointer but those don't use
   2882 	 * alternative MAC address in reality.
   2883 	 *
   2884 	 * Check whether the broadcast bit is set or not.
   2885 	 */
   2886 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2887 		if (((myea[0] & 0xff) & 0x01) == 0)
   2888 			return offset; /* Found */
   2889 
   2890 	/* Not found */
   2891 	return 0;
   2892 }
   2893 
   2894 static int
   2895 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2896 {
   2897 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2898 	uint16_t offset = NVM_OFF_MACADDR;
   2899 	int do_invert = 0;
   2900 
   2901 	switch (sc->sc_type) {
   2902 	case WM_T_82580:
   2903 	case WM_T_I350:
   2904 	case WM_T_I354:
   2905 		/* EEPROM Top Level Partitioning */
   2906 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2907 		break;
   2908 	case WM_T_82571:
   2909 	case WM_T_82575:
   2910 	case WM_T_82576:
   2911 	case WM_T_80003:
   2912 	case WM_T_I210:
   2913 	case WM_T_I211:
   2914 		offset = wm_check_alt_mac_addr(sc);
   2915 		if (offset == 0)
   2916 			if ((sc->sc_funcid & 0x01) == 1)
   2917 				do_invert = 1;
   2918 		break;
   2919 	default:
   2920 		if ((sc->sc_funcid & 0x01) == 1)
   2921 			do_invert = 1;
   2922 		break;
   2923 	}
   2924 
   2925 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2926 		myea) != 0)
   2927 		goto bad;
   2928 
   2929 	enaddr[0] = myea[0] & 0xff;
   2930 	enaddr[1] = myea[0] >> 8;
   2931 	enaddr[2] = myea[1] & 0xff;
   2932 	enaddr[3] = myea[1] >> 8;
   2933 	enaddr[4] = myea[2] & 0xff;
   2934 	enaddr[5] = myea[2] >> 8;
   2935 
   2936 	/*
   2937 	 * Toggle the LSB of the MAC address on the second port
   2938 	 * of some dual port cards.
   2939 	 */
   2940 	if (do_invert != 0)
   2941 		enaddr[5] ^= 1;
   2942 
   2943 	return 0;
   2944 
   2945  bad:
   2946 	return -1;
   2947 }
   2948 
   2949 /*
   2950  * wm_set_ral:
   2951  *
   2952  *	Set an entery in the receive address list.
   2953  */
   2954 static void
   2955 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2956 {
   2957 	uint32_t ral_lo, ral_hi;
   2958 
   2959 	if (enaddr != NULL) {
   2960 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2961 		    (enaddr[3] << 24);
   2962 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2963 		ral_hi |= RAL_AV;
   2964 	} else {
   2965 		ral_lo = 0;
   2966 		ral_hi = 0;
   2967 	}
   2968 
   2969 	if (sc->sc_type >= WM_T_82544) {
   2970 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2971 		    ral_lo);
   2972 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2973 		    ral_hi);
   2974 	} else {
   2975 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2976 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2977 	}
   2978 }
   2979 
   2980 /*
   2981  * wm_mchash:
   2982  *
   2983  *	Compute the hash of the multicast address for the 4096-bit
   2984  *	multicast filter.
   2985  */
   2986 static uint32_t
   2987 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2988 {
   2989 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2990 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2991 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2992 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2993 	uint32_t hash;
   2994 
   2995 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2996 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2997 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   2998 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   2999 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3000 		return (hash & 0x3ff);
   3001 	}
   3002 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3003 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3004 
   3005 	return (hash & 0xfff);
   3006 }
   3007 
   3008 /*
   3009  * wm_set_filter:
   3010  *
   3011  *	Set up the receive filter.
   3012  */
   3013 static void
   3014 wm_set_filter(struct wm_softc *sc)
   3015 {
   3016 	struct ethercom *ec = &sc->sc_ethercom;
   3017 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3018 	struct ether_multi *enm;
   3019 	struct ether_multistep step;
   3020 	bus_addr_t mta_reg;
   3021 	uint32_t hash, reg, bit;
   3022 	int i, size;
   3023 
   3024 	if (sc->sc_type >= WM_T_82544)
   3025 		mta_reg = WMREG_CORDOVA_MTA;
   3026 	else
   3027 		mta_reg = WMREG_MTA;
   3028 
   3029 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3030 
   3031 	if (ifp->if_flags & IFF_BROADCAST)
   3032 		sc->sc_rctl |= RCTL_BAM;
   3033 	if (ifp->if_flags & IFF_PROMISC) {
   3034 		sc->sc_rctl |= RCTL_UPE;
   3035 		goto allmulti;
   3036 	}
   3037 
   3038 	/*
   3039 	 * Set the station address in the first RAL slot, and
   3040 	 * clear the remaining slots.
   3041 	 */
   3042 	if (sc->sc_type == WM_T_ICH8)
   3043 		size = WM_RAL_TABSIZE_ICH8 -1;
   3044 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3045 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3046 	    || (sc->sc_type == WM_T_PCH_LPT))
   3047 		size = WM_RAL_TABSIZE_ICH8;
   3048 	else if (sc->sc_type == WM_T_82575)
   3049 		size = WM_RAL_TABSIZE_82575;
   3050 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3051 		size = WM_RAL_TABSIZE_82576;
   3052 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3053 		size = WM_RAL_TABSIZE_I350;
   3054 	else
   3055 		size = WM_RAL_TABSIZE;
   3056 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3057 	for (i = 1; i < size; i++)
   3058 		wm_set_ral(sc, NULL, i);
   3059 
   3060 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3061 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3062 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3063 		size = WM_ICH8_MC_TABSIZE;
   3064 	else
   3065 		size = WM_MC_TABSIZE;
   3066 	/* Clear out the multicast table. */
   3067 	for (i = 0; i < size; i++)
   3068 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3069 
   3070 	ETHER_FIRST_MULTI(step, ec, enm);
   3071 	while (enm != NULL) {
   3072 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3073 			/*
   3074 			 * We must listen to a range of multicast addresses.
   3075 			 * For now, just accept all multicasts, rather than
   3076 			 * trying to set only those filter bits needed to match
   3077 			 * the range.  (At this time, the only use of address
   3078 			 * ranges is for IP multicast routing, for which the
   3079 			 * range is big enough to require all bits set.)
   3080 			 */
   3081 			goto allmulti;
   3082 		}
   3083 
   3084 		hash = wm_mchash(sc, enm->enm_addrlo);
   3085 
   3086 		reg = (hash >> 5);
   3087 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3088 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3089 		    || (sc->sc_type == WM_T_PCH2)
   3090 		    || (sc->sc_type == WM_T_PCH_LPT))
   3091 			reg &= 0x1f;
   3092 		else
   3093 			reg &= 0x7f;
   3094 		bit = hash & 0x1f;
   3095 
   3096 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3097 		hash |= 1U << bit;
   3098 
   3099 		/* XXX Hardware bug?? */
   3100 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3101 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3102 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3103 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3104 		} else
   3105 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3106 
   3107 		ETHER_NEXT_MULTI(step, enm);
   3108 	}
   3109 
   3110 	ifp->if_flags &= ~IFF_ALLMULTI;
   3111 	goto setit;
   3112 
   3113  allmulti:
   3114 	ifp->if_flags |= IFF_ALLMULTI;
   3115 	sc->sc_rctl |= RCTL_MPE;
   3116 
   3117  setit:
   3118 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3119 }
   3120 
   3121 /* Reset and init related */
   3122 
   3123 static void
   3124 wm_set_vlan(struct wm_softc *sc)
   3125 {
   3126 	/* Deal with VLAN enables. */
   3127 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3128 		sc->sc_ctrl |= CTRL_VME;
   3129 	else
   3130 		sc->sc_ctrl &= ~CTRL_VME;
   3131 
   3132 	/* Write the control registers. */
   3133 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3134 }
   3135 
   3136 static void
   3137 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3138 {
   3139 	uint32_t gcr;
   3140 	pcireg_t ctrl2;
   3141 
   3142 	gcr = CSR_READ(sc, WMREG_GCR);
   3143 
   3144 	/* Only take action if timeout value is defaulted to 0 */
   3145 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3146 		goto out;
   3147 
   3148 	if ((gcr & GCR_CAP_VER2) == 0) {
   3149 		gcr |= GCR_CMPL_TMOUT_10MS;
   3150 		goto out;
   3151 	}
   3152 
   3153 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3154 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3155 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3156 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3157 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3158 
   3159 out:
   3160 	/* Disable completion timeout resend */
   3161 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3162 
   3163 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3164 }
   3165 
   3166 void
   3167 wm_get_auto_rd_done(struct wm_softc *sc)
   3168 {
   3169 	int i;
   3170 
   3171 	/* wait for eeprom to reload */
   3172 	switch (sc->sc_type) {
   3173 	case WM_T_82571:
   3174 	case WM_T_82572:
   3175 	case WM_T_82573:
   3176 	case WM_T_82574:
   3177 	case WM_T_82583:
   3178 	case WM_T_82575:
   3179 	case WM_T_82576:
   3180 	case WM_T_82580:
   3181 	case WM_T_I350:
   3182 	case WM_T_I354:
   3183 	case WM_T_I210:
   3184 	case WM_T_I211:
   3185 	case WM_T_80003:
   3186 	case WM_T_ICH8:
   3187 	case WM_T_ICH9:
   3188 		for (i = 0; i < 10; i++) {
   3189 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3190 				break;
   3191 			delay(1000);
   3192 		}
   3193 		if (i == 10) {
   3194 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3195 			    "complete\n", device_xname(sc->sc_dev));
   3196 		}
   3197 		break;
   3198 	default:
   3199 		break;
   3200 	}
   3201 }
   3202 
   3203 void
   3204 wm_lan_init_done(struct wm_softc *sc)
   3205 {
   3206 	uint32_t reg = 0;
   3207 	int i;
   3208 
   3209 	/* wait for eeprom to reload */
   3210 	switch (sc->sc_type) {
   3211 	case WM_T_ICH10:
   3212 	case WM_T_PCH:
   3213 	case WM_T_PCH2:
   3214 	case WM_T_PCH_LPT:
   3215 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3216 			reg = CSR_READ(sc, WMREG_STATUS);
   3217 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3218 				break;
   3219 			delay(100);
   3220 		}
   3221 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3222 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3223 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3224 		}
   3225 		break;
   3226 	default:
   3227 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3228 		    __func__);
   3229 		break;
   3230 	}
   3231 
   3232 	reg &= ~STATUS_LAN_INIT_DONE;
   3233 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3234 }
   3235 
   3236 void
   3237 wm_get_cfg_done(struct wm_softc *sc)
   3238 {
   3239 	int mask;
   3240 	uint32_t reg;
   3241 	int i;
   3242 
   3243 	/* wait for eeprom to reload */
   3244 	switch (sc->sc_type) {
   3245 	case WM_T_82542_2_0:
   3246 	case WM_T_82542_2_1:
   3247 		/* null */
   3248 		break;
   3249 	case WM_T_82543:
   3250 	case WM_T_82544:
   3251 	case WM_T_82540:
   3252 	case WM_T_82545:
   3253 	case WM_T_82545_3:
   3254 	case WM_T_82546:
   3255 	case WM_T_82546_3:
   3256 	case WM_T_82541:
   3257 	case WM_T_82541_2:
   3258 	case WM_T_82547:
   3259 	case WM_T_82547_2:
   3260 	case WM_T_82573:
   3261 	case WM_T_82574:
   3262 	case WM_T_82583:
   3263 		/* generic */
   3264 		delay(10*1000);
   3265 		break;
   3266 	case WM_T_80003:
   3267 	case WM_T_82571:
   3268 	case WM_T_82572:
   3269 	case WM_T_82575:
   3270 	case WM_T_82576:
   3271 	case WM_T_82580:
   3272 	case WM_T_I350:
   3273 	case WM_T_I354:
   3274 	case WM_T_I210:
   3275 	case WM_T_I211:
   3276 		if (sc->sc_type == WM_T_82571) {
   3277 			/* Only 82571 shares port 0 */
   3278 			mask = EEMNGCTL_CFGDONE_0;
   3279 		} else
   3280 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3281 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3282 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3283 				break;
   3284 			delay(1000);
   3285 		}
   3286 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3287 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3288 				device_xname(sc->sc_dev), __func__));
   3289 		}
   3290 		break;
   3291 	case WM_T_ICH8:
   3292 	case WM_T_ICH9:
   3293 	case WM_T_ICH10:
   3294 	case WM_T_PCH:
   3295 	case WM_T_PCH2:
   3296 	case WM_T_PCH_LPT:
   3297 		delay(10*1000);
   3298 		if (sc->sc_type >= WM_T_ICH10)
   3299 			wm_lan_init_done(sc);
   3300 		else
   3301 			wm_get_auto_rd_done(sc);
   3302 
   3303 		reg = CSR_READ(sc, WMREG_STATUS);
   3304 		if ((reg & STATUS_PHYRA) != 0)
   3305 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3306 		break;
   3307 	default:
   3308 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3309 		    __func__);
   3310 		break;
   3311 	}
   3312 }
   3313 
   3314 /* Init hardware bits */
   3315 void
   3316 wm_initialize_hardware_bits(struct wm_softc *sc)
   3317 {
   3318 	uint32_t tarc0, tarc1, reg;
   3319 
   3320 	/* For 82571 variant, 80003 and ICHs */
   3321 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3322 	    || (sc->sc_type >= WM_T_80003)) {
   3323 
   3324 		/* Transmit Descriptor Control 0 */
   3325 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3326 		reg |= TXDCTL_COUNT_DESC;
   3327 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3328 
   3329 		/* Transmit Descriptor Control 1 */
   3330 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3331 		reg |= TXDCTL_COUNT_DESC;
   3332 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3333 
   3334 		/* TARC0 */
   3335 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3336 		switch (sc->sc_type) {
   3337 		case WM_T_82571:
   3338 		case WM_T_82572:
   3339 		case WM_T_82573:
   3340 		case WM_T_82574:
   3341 		case WM_T_82583:
   3342 		case WM_T_80003:
   3343 			/* Clear bits 30..27 */
   3344 			tarc0 &= ~__BITS(30, 27);
   3345 			break;
   3346 		default:
   3347 			break;
   3348 		}
   3349 
   3350 		switch (sc->sc_type) {
   3351 		case WM_T_82571:
   3352 		case WM_T_82572:
   3353 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3354 
   3355 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3356 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3357 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3358 			/* 8257[12] Errata No.7 */
   3359 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3360 
   3361 			/* TARC1 bit 28 */
   3362 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3363 				tarc1 &= ~__BIT(28);
   3364 			else
   3365 				tarc1 |= __BIT(28);
   3366 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3367 
   3368 			/*
   3369 			 * 8257[12] Errata No.13
   3370 			 * Disable Dyamic Clock Gating.
   3371 			 */
   3372 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3373 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3374 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3375 			break;
   3376 		case WM_T_82573:
   3377 		case WM_T_82574:
   3378 		case WM_T_82583:
   3379 			if ((sc->sc_type == WM_T_82574)
   3380 			    || (sc->sc_type == WM_T_82583))
   3381 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3382 
   3383 			/* Extended Device Control */
   3384 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3385 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3386 			reg |= __BIT(22);	/* Set bit 22 */
   3387 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3388 
   3389 			/* Device Control */
   3390 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3391 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3392 
   3393 			/* PCIe Control Register */
   3394 			if ((sc->sc_type == WM_T_82574)
   3395 			    || (sc->sc_type == WM_T_82583)) {
   3396 				/*
   3397 				 * Document says this bit must be set for
   3398 				 * proper operation.
   3399 				 */
   3400 				reg = CSR_READ(sc, WMREG_GCR);
   3401 				reg |= __BIT(22);
   3402 				CSR_WRITE(sc, WMREG_GCR, reg);
   3403 
   3404 				/*
   3405 				 * Apply workaround for hardware errata
   3406 				 * documented in errata docs Fixes issue where
   3407 				 * some error prone or unreliable PCIe
   3408 				 * completions are occurring, particularly
   3409 				 * with ASPM enabled. Without fix, issue can
   3410 				 * cause Tx timeouts.
   3411 				 */
   3412 				reg = CSR_READ(sc, WMREG_GCR2);
   3413 				reg |= __BIT(0);
   3414 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3415 			}
   3416 			break;
   3417 		case WM_T_80003:
   3418 			/* TARC0 */
   3419 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3420 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3421 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3422 
   3423 			/* TARC1 bit 28 */
   3424 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3425 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3426 				tarc1 &= ~__BIT(28);
   3427 			else
   3428 				tarc1 |= __BIT(28);
   3429 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3430 			break;
   3431 		case WM_T_ICH8:
   3432 		case WM_T_ICH9:
   3433 		case WM_T_ICH10:
   3434 		case WM_T_PCH:
   3435 		case WM_T_PCH2:
   3436 		case WM_T_PCH_LPT:
   3437 			/* TARC 0 */
   3438 			if (sc->sc_type == WM_T_ICH8) {
   3439 				/* Set TARC0 bits 29 and 28 */
   3440 				tarc0 |= __BITS(29, 28);
   3441 			}
   3442 			/* Set TARC0 bits 23,24,26,27 */
   3443 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3444 
   3445 			/* CTRL_EXT */
   3446 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3447 			reg |= __BIT(22);	/* Set bit 22 */
   3448 			/*
   3449 			 * Enable PHY low-power state when MAC is at D3
   3450 			 * w/o WoL
   3451 			 */
   3452 			if (sc->sc_type >= WM_T_PCH)
   3453 				reg |= CTRL_EXT_PHYPDEN;
   3454 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3455 
   3456 			/* TARC1 */
   3457 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3458 			/* bit 28 */
   3459 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3460 				tarc1 &= ~__BIT(28);
   3461 			else
   3462 				tarc1 |= __BIT(28);
   3463 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3464 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3465 
   3466 			/* Device Status */
   3467 			if (sc->sc_type == WM_T_ICH8) {
   3468 				reg = CSR_READ(sc, WMREG_STATUS);
   3469 				reg &= ~__BIT(31);
   3470 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3471 
   3472 			}
   3473 
   3474 			/*
   3475 			 * Work-around descriptor data corruption issue during
   3476 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3477 			 * capability.
   3478 			 */
   3479 			reg = CSR_READ(sc, WMREG_RFCTL);
   3480 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3481 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3482 			break;
   3483 		default:
   3484 			break;
   3485 		}
   3486 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3487 
   3488 		/*
   3489 		 * 8257[12] Errata No.52 and some others.
   3490 		 * Avoid RSS Hash Value bug.
   3491 		 */
   3492 		switch (sc->sc_type) {
   3493 		case WM_T_82571:
   3494 		case WM_T_82572:
   3495 		case WM_T_82573:
   3496 		case WM_T_80003:
   3497 		case WM_T_ICH8:
   3498 			reg = CSR_READ(sc, WMREG_RFCTL);
   3499 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3500 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3501 			break;
   3502 		default:
   3503 			break;
   3504 		}
   3505 	}
   3506 }
   3507 
   3508 static uint32_t
   3509 wm_rxpbs_adjust_82580(uint32_t val)
   3510 {
   3511 	uint32_t rv = 0;
   3512 
   3513 	if (val < __arraycount(wm_82580_rxpbs_table))
   3514 		rv = wm_82580_rxpbs_table[val];
   3515 
   3516 	return rv;
   3517 }
   3518 
   3519 /*
   3520  * wm_reset:
   3521  *
   3522  *	Reset the i82542 chip.
   3523  */
   3524 static void
   3525 wm_reset(struct wm_softc *sc)
   3526 {
   3527 	int phy_reset = 0;
   3528 	int error = 0;
   3529 	uint32_t reg, mask;
   3530 
   3531 	/*
   3532 	 * Allocate on-chip memory according to the MTU size.
   3533 	 * The Packet Buffer Allocation register must be written
   3534 	 * before the chip is reset.
   3535 	 */
   3536 	switch (sc->sc_type) {
   3537 	case WM_T_82547:
   3538 	case WM_T_82547_2:
   3539 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3540 		    PBA_22K : PBA_30K;
   3541 		sc->sc_txfifo_head = 0;
   3542 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3543 		sc->sc_txfifo_size =
   3544 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3545 		sc->sc_txfifo_stall = 0;
   3546 		break;
   3547 	case WM_T_82571:
   3548 	case WM_T_82572:
   3549 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3550 	case WM_T_80003:
   3551 		sc->sc_pba = PBA_32K;
   3552 		break;
   3553 	case WM_T_82573:
   3554 		sc->sc_pba = PBA_12K;
   3555 		break;
   3556 	case WM_T_82574:
   3557 	case WM_T_82583:
   3558 		sc->sc_pba = PBA_20K;
   3559 		break;
   3560 	case WM_T_82576:
   3561 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3562 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3563 		break;
   3564 	case WM_T_82580:
   3565 	case WM_T_I350:
   3566 	case WM_T_I354:
   3567 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3568 		break;
   3569 	case WM_T_I210:
   3570 	case WM_T_I211:
   3571 		sc->sc_pba = PBA_34K;
   3572 		break;
   3573 	case WM_T_ICH8:
   3574 		/* Workaround for a bit corruption issue in FIFO memory */
   3575 		sc->sc_pba = PBA_8K;
   3576 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3577 		break;
   3578 	case WM_T_ICH9:
   3579 	case WM_T_ICH10:
   3580 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3581 		    PBA_14K : PBA_10K;
   3582 		break;
   3583 	case WM_T_PCH:
   3584 	case WM_T_PCH2:
   3585 	case WM_T_PCH_LPT:
   3586 		sc->sc_pba = PBA_26K;
   3587 		break;
   3588 	default:
   3589 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3590 		    PBA_40K : PBA_48K;
   3591 		break;
   3592 	}
   3593 	/*
   3594 	 * Only old or non-multiqueue devices have the PBA register
   3595 	 * XXX Need special handling for 82575.
   3596 	 */
   3597 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3598 	    || (sc->sc_type == WM_T_82575))
   3599 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3600 
   3601 	/* Prevent the PCI-E bus from sticking */
   3602 	if (sc->sc_flags & WM_F_PCIE) {
   3603 		int timeout = 800;
   3604 
   3605 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3606 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3607 
   3608 		while (timeout--) {
   3609 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3610 			    == 0)
   3611 				break;
   3612 			delay(100);
   3613 		}
   3614 	}
   3615 
   3616 	/* Set the completion timeout for interface */
   3617 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3618 	    || (sc->sc_type == WM_T_82580)
   3619 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3620 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3621 		wm_set_pcie_completion_timeout(sc);
   3622 
   3623 	/* Clear interrupt */
   3624 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3625 
   3626 	/* Stop the transmit and receive processes. */
   3627 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3628 	sc->sc_rctl &= ~RCTL_EN;
   3629 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3630 	CSR_WRITE_FLUSH(sc);
   3631 
   3632 	/* XXX set_tbi_sbp_82543() */
   3633 
   3634 	delay(10*1000);
   3635 
   3636 	/* Must acquire the MDIO ownership before MAC reset */
   3637 	switch (sc->sc_type) {
   3638 	case WM_T_82573:
   3639 	case WM_T_82574:
   3640 	case WM_T_82583:
   3641 		error = wm_get_hw_semaphore_82573(sc);
   3642 		break;
   3643 	default:
   3644 		break;
   3645 	}
   3646 
   3647 	/*
   3648 	 * 82541 Errata 29? & 82547 Errata 28?
   3649 	 * See also the description about PHY_RST bit in CTRL register
   3650 	 * in 8254x_GBe_SDM.pdf.
   3651 	 */
   3652 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3653 		CSR_WRITE(sc, WMREG_CTRL,
   3654 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3655 		CSR_WRITE_FLUSH(sc);
   3656 		delay(5000);
   3657 	}
   3658 
   3659 	switch (sc->sc_type) {
   3660 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3661 	case WM_T_82541:
   3662 	case WM_T_82541_2:
   3663 	case WM_T_82547:
   3664 	case WM_T_82547_2:
   3665 		/*
   3666 		 * On some chipsets, a reset through a memory-mapped write
   3667 		 * cycle can cause the chip to reset before completing the
   3668 		 * write cycle.  This causes major headache that can be
   3669 		 * avoided by issuing the reset via indirect register writes
   3670 		 * through I/O space.
   3671 		 *
   3672 		 * So, if we successfully mapped the I/O BAR at attach time,
   3673 		 * use that.  Otherwise, try our luck with a memory-mapped
   3674 		 * reset.
   3675 		 */
   3676 		if (sc->sc_flags & WM_F_IOH_VALID)
   3677 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3678 		else
   3679 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3680 		break;
   3681 	case WM_T_82545_3:
   3682 	case WM_T_82546_3:
   3683 		/* Use the shadow control register on these chips. */
   3684 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3685 		break;
   3686 	case WM_T_80003:
   3687 		mask = swfwphysem[sc->sc_funcid];
   3688 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3689 		wm_get_swfw_semaphore(sc, mask);
   3690 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3691 		wm_put_swfw_semaphore(sc, mask);
   3692 		break;
   3693 	case WM_T_ICH8:
   3694 	case WM_T_ICH9:
   3695 	case WM_T_ICH10:
   3696 	case WM_T_PCH:
   3697 	case WM_T_PCH2:
   3698 	case WM_T_PCH_LPT:
   3699 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3700 		if (wm_check_reset_block(sc) == 0) {
   3701 			/*
   3702 			 * Gate automatic PHY configuration by hardware on
   3703 			 * non-managed 82579
   3704 			 */
   3705 			if ((sc->sc_type == WM_T_PCH2)
   3706 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3707 				!= 0))
   3708 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3709 
   3710 
   3711 			reg |= CTRL_PHY_RESET;
   3712 			phy_reset = 1;
   3713 		}
   3714 		wm_get_swfwhw_semaphore(sc);
   3715 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3716 		/* Don't insert a completion barrier when reset */
   3717 		delay(20*1000);
   3718 		wm_put_swfwhw_semaphore(sc);
   3719 		break;
   3720 	case WM_T_82580:
   3721 	case WM_T_I350:
   3722 	case WM_T_I354:
   3723 	case WM_T_I210:
   3724 	case WM_T_I211:
   3725 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3726 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3727 			CSR_WRITE_FLUSH(sc);
   3728 		delay(5000);
   3729 		break;
   3730 	case WM_T_82542_2_0:
   3731 	case WM_T_82542_2_1:
   3732 	case WM_T_82543:
   3733 	case WM_T_82540:
   3734 	case WM_T_82545:
   3735 	case WM_T_82546:
   3736 	case WM_T_82571:
   3737 	case WM_T_82572:
   3738 	case WM_T_82573:
   3739 	case WM_T_82574:
   3740 	case WM_T_82575:
   3741 	case WM_T_82576:
   3742 	case WM_T_82583:
   3743 	default:
   3744 		/* Everything else can safely use the documented method. */
   3745 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3746 		break;
   3747 	}
   3748 
   3749 	/* Must release the MDIO ownership after MAC reset */
   3750 	switch (sc->sc_type) {
   3751 	case WM_T_82573:
   3752 	case WM_T_82574:
   3753 	case WM_T_82583:
   3754 		if (error == 0)
   3755 			wm_put_hw_semaphore_82573(sc);
   3756 		break;
   3757 	default:
   3758 		break;
   3759 	}
   3760 
   3761 	if (phy_reset != 0)
   3762 		wm_get_cfg_done(sc);
   3763 
   3764 	/* reload EEPROM */
   3765 	switch (sc->sc_type) {
   3766 	case WM_T_82542_2_0:
   3767 	case WM_T_82542_2_1:
   3768 	case WM_T_82543:
   3769 	case WM_T_82544:
   3770 		delay(10);
   3771 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3772 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3773 		CSR_WRITE_FLUSH(sc);
   3774 		delay(2000);
   3775 		break;
   3776 	case WM_T_82540:
   3777 	case WM_T_82545:
   3778 	case WM_T_82545_3:
   3779 	case WM_T_82546:
   3780 	case WM_T_82546_3:
   3781 		delay(5*1000);
   3782 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3783 		break;
   3784 	case WM_T_82541:
   3785 	case WM_T_82541_2:
   3786 	case WM_T_82547:
   3787 	case WM_T_82547_2:
   3788 		delay(20000);
   3789 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3790 		break;
   3791 	case WM_T_82571:
   3792 	case WM_T_82572:
   3793 	case WM_T_82573:
   3794 	case WM_T_82574:
   3795 	case WM_T_82583:
   3796 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3797 			delay(10);
   3798 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3799 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3800 			CSR_WRITE_FLUSH(sc);
   3801 		}
   3802 		/* check EECD_EE_AUTORD */
   3803 		wm_get_auto_rd_done(sc);
   3804 		/*
   3805 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3806 		 * is set.
   3807 		 */
   3808 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3809 		    || (sc->sc_type == WM_T_82583))
   3810 			delay(25*1000);
   3811 		break;
   3812 	case WM_T_82575:
   3813 	case WM_T_82576:
   3814 	case WM_T_82580:
   3815 	case WM_T_I350:
   3816 	case WM_T_I354:
   3817 	case WM_T_I210:
   3818 	case WM_T_I211:
   3819 	case WM_T_80003:
   3820 		/* check EECD_EE_AUTORD */
   3821 		wm_get_auto_rd_done(sc);
   3822 		break;
   3823 	case WM_T_ICH8:
   3824 	case WM_T_ICH9:
   3825 	case WM_T_ICH10:
   3826 	case WM_T_PCH:
   3827 	case WM_T_PCH2:
   3828 	case WM_T_PCH_LPT:
   3829 		break;
   3830 	default:
   3831 		panic("%s: unknown type\n", __func__);
   3832 	}
   3833 
   3834 	/* Check whether EEPROM is present or not */
   3835 	switch (sc->sc_type) {
   3836 	case WM_T_82575:
   3837 	case WM_T_82576:
   3838 	case WM_T_82580:
   3839 	case WM_T_I350:
   3840 	case WM_T_I354:
   3841 	case WM_T_ICH8:
   3842 	case WM_T_ICH9:
   3843 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3844 			/* Not found */
   3845 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3846 			if (sc->sc_type == WM_T_82575)
   3847 				wm_reset_init_script_82575(sc);
   3848 		}
   3849 		break;
   3850 	default:
   3851 		break;
   3852 	}
   3853 
   3854 	if ((sc->sc_type == WM_T_82580)
   3855 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3856 		/* clear global device reset status bit */
   3857 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3858 	}
   3859 
   3860 	/* Clear any pending interrupt events. */
   3861 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3862 	reg = CSR_READ(sc, WMREG_ICR);
   3863 
   3864 	/* reload sc_ctrl */
   3865 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3866 
   3867 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3868 		wm_set_eee_i350(sc);
   3869 
   3870 	/* dummy read from WUC */
   3871 	if (sc->sc_type == WM_T_PCH)
   3872 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3873 	/*
   3874 	 * For PCH, this write will make sure that any noise will be detected
   3875 	 * as a CRC error and be dropped rather than show up as a bad packet
   3876 	 * to the DMA engine
   3877 	 */
   3878 	if (sc->sc_type == WM_T_PCH)
   3879 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3880 
   3881 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3882 		CSR_WRITE(sc, WMREG_WUC, 0);
   3883 
   3884 	wm_reset_mdicnfg_82580(sc);
   3885 
   3886 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3887 		wm_pll_workaround_i210(sc);
   3888 }
   3889 
   3890 /*
   3891  * wm_add_rxbuf:
   3892  *
   3893  *	Add a receive buffer to the indiciated descriptor.
   3894  */
   3895 static int
   3896 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3897 {
   3898 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3899 	struct mbuf *m;
   3900 	int error;
   3901 
   3902 	KASSERT(WM_RX_LOCKED(sc));
   3903 
   3904 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3905 	if (m == NULL)
   3906 		return ENOBUFS;
   3907 
   3908 	MCLGET(m, M_DONTWAIT);
   3909 	if ((m->m_flags & M_EXT) == 0) {
   3910 		m_freem(m);
   3911 		return ENOBUFS;
   3912 	}
   3913 
   3914 	if (rxs->rxs_mbuf != NULL)
   3915 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3916 
   3917 	rxs->rxs_mbuf = m;
   3918 
   3919 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3920 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3921 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3922 	if (error) {
   3923 		/* XXX XXX XXX */
   3924 		aprint_error_dev(sc->sc_dev,
   3925 		    "unable to load rx DMA map %d, error = %d\n",
   3926 		    idx, error);
   3927 		panic("wm_add_rxbuf");
   3928 	}
   3929 
   3930 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3931 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3932 
   3933 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3934 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3935 			WM_INIT_RXDESC(sc, idx);
   3936 	} else
   3937 		WM_INIT_RXDESC(sc, idx);
   3938 
   3939 	return 0;
   3940 }
   3941 
   3942 /*
   3943  * wm_rxdrain:
   3944  *
   3945  *	Drain the receive queue.
   3946  */
   3947 static void
   3948 wm_rxdrain(struct wm_softc *sc)
   3949 {
   3950 	struct wm_rxsoft *rxs;
   3951 	int i;
   3952 
   3953 	KASSERT(WM_RX_LOCKED(sc));
   3954 
   3955 	for (i = 0; i < WM_NRXDESC; i++) {
   3956 		rxs = &sc->sc_rxsoft[i];
   3957 		if (rxs->rxs_mbuf != NULL) {
   3958 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3959 			m_freem(rxs->rxs_mbuf);
   3960 			rxs->rxs_mbuf = NULL;
   3961 		}
   3962 	}
   3963 }
   3964 
   3965 /*
   3966  * wm_init:		[ifnet interface function]
   3967  *
   3968  *	Initialize the interface.
   3969  */
   3970 static int
   3971 wm_init(struct ifnet *ifp)
   3972 {
   3973 	struct wm_softc *sc = ifp->if_softc;
   3974 	int ret;
   3975 
   3976 	WM_BOTH_LOCK(sc);
   3977 	ret = wm_init_locked(ifp);
   3978 	WM_BOTH_UNLOCK(sc);
   3979 
   3980 	return ret;
   3981 }
   3982 
   3983 static int
   3984 wm_init_locked(struct ifnet *ifp)
   3985 {
   3986 	struct wm_softc *sc = ifp->if_softc;
   3987 	struct wm_rxsoft *rxs;
   3988 	int i, j, trynum, error = 0;
   3989 	uint32_t reg;
   3990 
   3991 	KASSERT(WM_BOTH_LOCKED(sc));
   3992 	/*
   3993 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   3994 	 * There is a small but measurable benefit to avoiding the adjusment
   3995 	 * of the descriptor so that the headers are aligned, for normal mtu,
   3996 	 * on such platforms.  One possibility is that the DMA itself is
   3997 	 * slightly more efficient if the front of the entire packet (instead
   3998 	 * of the front of the headers) is aligned.
   3999 	 *
   4000 	 * Note we must always set align_tweak to 0 if we are using
   4001 	 * jumbo frames.
   4002 	 */
   4003 #ifdef __NO_STRICT_ALIGNMENT
   4004 	sc->sc_align_tweak = 0;
   4005 #else
   4006 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4007 		sc->sc_align_tweak = 0;
   4008 	else
   4009 		sc->sc_align_tweak = 2;
   4010 #endif /* __NO_STRICT_ALIGNMENT */
   4011 
   4012 	/* Cancel any pending I/O. */
   4013 	wm_stop_locked(ifp, 0);
   4014 
   4015 	/* update statistics before reset */
   4016 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4017 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4018 
   4019 	/* Reset the chip to a known state. */
   4020 	wm_reset(sc);
   4021 
   4022 	switch (sc->sc_type) {
   4023 	case WM_T_82571:
   4024 	case WM_T_82572:
   4025 	case WM_T_82573:
   4026 	case WM_T_82574:
   4027 	case WM_T_82583:
   4028 	case WM_T_80003:
   4029 	case WM_T_ICH8:
   4030 	case WM_T_ICH9:
   4031 	case WM_T_ICH10:
   4032 	case WM_T_PCH:
   4033 	case WM_T_PCH2:
   4034 	case WM_T_PCH_LPT:
   4035 		if (wm_check_mng_mode(sc) != 0)
   4036 			wm_get_hw_control(sc);
   4037 		break;
   4038 	default:
   4039 		break;
   4040 	}
   4041 
   4042 	/* Init hardware bits */
   4043 	wm_initialize_hardware_bits(sc);
   4044 
   4045 	/* Reset the PHY. */
   4046 	if (sc->sc_flags & WM_F_HAS_MII)
   4047 		wm_gmii_reset(sc);
   4048 
   4049 	/* Calculate (E)ITR value */
   4050 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4051 		sc->sc_itr = 450;	/* For EITR */
   4052 	} else if (sc->sc_type >= WM_T_82543) {
   4053 		/*
   4054 		 * Set up the interrupt throttling register (units of 256ns)
   4055 		 * Note that a footnote in Intel's documentation says this
   4056 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4057 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4058 		 * that that is also true for the 1024ns units of the other
   4059 		 * interrupt-related timer registers -- so, really, we ought
   4060 		 * to divide this value by 4 when the link speed is low.
   4061 		 *
   4062 		 * XXX implement this division at link speed change!
   4063 		 */
   4064 
   4065 		/*
   4066 		 * For N interrupts/sec, set this value to:
   4067 		 * 1000000000 / (N * 256).  Note that we set the
   4068 		 * absolute and packet timer values to this value
   4069 		 * divided by 4 to get "simple timer" behavior.
   4070 		 */
   4071 
   4072 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4073 	}
   4074 
   4075 	/* Initialize the transmit descriptor ring. */
   4076 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   4077 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   4078 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4079 	sc->sc_txfree = WM_NTXDESC(sc);
   4080 	sc->sc_txnext = 0;
   4081 
   4082 	if (sc->sc_type < WM_T_82543) {
   4083 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4084 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4085 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   4086 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   4087 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   4088 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   4089 	} else {
   4090 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4091 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4092 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   4093 		CSR_WRITE(sc, WMREG_TDH, 0);
   4094 
   4095 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4096 			/*
   4097 			 * Don't write TDT before TCTL.EN is set.
   4098 			 * See the document.
   4099 			 */
   4100 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
   4101 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   4102 			    | TXDCTL_WTHRESH(0));
   4103 		else {
   4104 			/* ITR / 4 */
   4105 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   4106 			if (sc->sc_type >= WM_T_82540) {
   4107 				/* should be same */
   4108 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   4109 			}
   4110 
   4111 			CSR_WRITE(sc, WMREG_TDT, 0);
   4112 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
   4113 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   4114 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   4115 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   4116 		}
   4117 	}
   4118 
   4119 	/* Initialize the transmit job descriptors. */
   4120 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   4121 		sc->sc_txsoft[i].txs_mbuf = NULL;
   4122 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   4123 	sc->sc_txsnext = 0;
   4124 	sc->sc_txsdirty = 0;
   4125 
   4126 	/*
   4127 	 * Initialize the receive descriptor and receive job
   4128 	 * descriptor rings.
   4129 	 */
   4130 	if (sc->sc_type < WM_T_82543) {
   4131 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   4132 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   4133 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   4134 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   4135 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   4136 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   4137 
   4138 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   4139 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   4140 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   4141 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   4142 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   4143 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   4144 	} else {
   4145 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   4146 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   4147 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   4148 
   4149 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4150 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   4151 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   4152 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   4153 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   4154 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   4155 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   4156 			    | RXDCTL_WTHRESH(1));
   4157 		} else {
   4158 			CSR_WRITE(sc, WMREG_RDH, 0);
   4159 			CSR_WRITE(sc, WMREG_RDT, 0);
   4160 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   4161 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   4162 		}
   4163 	}
   4164 	for (i = 0; i < WM_NRXDESC; i++) {
   4165 		rxs = &sc->sc_rxsoft[i];
   4166 		if (rxs->rxs_mbuf == NULL) {
   4167 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   4168 				log(LOG_ERR, "%s: unable to allocate or map "
   4169 				    "rx buffer %d, error = %d\n",
   4170 				    device_xname(sc->sc_dev), i, error);
   4171 				/*
   4172 				 * XXX Should attempt to run with fewer receive
   4173 				 * XXX buffers instead of just failing.
   4174 				 */
   4175 				wm_rxdrain(sc);
   4176 				goto out;
   4177 			}
   4178 		} else {
   4179 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4180 				WM_INIT_RXDESC(sc, i);
   4181 			/*
   4182 			 * For 82575 and newer device, the RX descriptors
   4183 			 * must be initialized after the setting of RCTL.EN in
   4184 			 * wm_set_filter()
   4185 			 */
   4186 		}
   4187 	}
   4188 	sc->sc_rxptr = 0;
   4189 	sc->sc_rxdiscard = 0;
   4190 	WM_RXCHAIN_RESET(sc);
   4191 
   4192 	/*
   4193 	 * Clear out the VLAN table -- we don't use it (yet).
   4194 	 */
   4195 	CSR_WRITE(sc, WMREG_VET, 0);
   4196 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4197 		trynum = 10; /* Due to hw errata */
   4198 	else
   4199 		trynum = 1;
   4200 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4201 		for (j = 0; j < trynum; j++)
   4202 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4203 
   4204 	/*
   4205 	 * Set up flow-control parameters.
   4206 	 *
   4207 	 * XXX Values could probably stand some tuning.
   4208 	 */
   4209 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4210 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4211 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4212 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4213 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4214 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4215 	}
   4216 
   4217 	sc->sc_fcrtl = FCRTL_DFLT;
   4218 	if (sc->sc_type < WM_T_82543) {
   4219 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4220 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4221 	} else {
   4222 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4223 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4224 	}
   4225 
   4226 	if (sc->sc_type == WM_T_80003)
   4227 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4228 	else
   4229 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4230 
   4231 	/* Writes the control register. */
   4232 	wm_set_vlan(sc);
   4233 
   4234 	if (sc->sc_flags & WM_F_HAS_MII) {
   4235 		int val;
   4236 
   4237 		switch (sc->sc_type) {
   4238 		case WM_T_80003:
   4239 		case WM_T_ICH8:
   4240 		case WM_T_ICH9:
   4241 		case WM_T_ICH10:
   4242 		case WM_T_PCH:
   4243 		case WM_T_PCH2:
   4244 		case WM_T_PCH_LPT:
   4245 			/*
   4246 			 * Set the mac to wait the maximum time between each
   4247 			 * iteration and increase the max iterations when
   4248 			 * polling the phy; this fixes erroneous timeouts at
   4249 			 * 10Mbps.
   4250 			 */
   4251 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4252 			    0xFFFF);
   4253 			val = wm_kmrn_readreg(sc,
   4254 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4255 			val |= 0x3F;
   4256 			wm_kmrn_writereg(sc,
   4257 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4258 			break;
   4259 		default:
   4260 			break;
   4261 		}
   4262 
   4263 		if (sc->sc_type == WM_T_80003) {
   4264 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4265 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4266 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4267 
   4268 			/* Bypass RX and TX FIFO's */
   4269 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4270 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4271 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4272 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4273 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4274 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4275 		}
   4276 	}
   4277 #if 0
   4278 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4279 #endif
   4280 
   4281 	/* Set up checksum offload parameters. */
   4282 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4283 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4284 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4285 		reg |= RXCSUM_IPOFL;
   4286 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4287 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4288 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4289 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4290 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4291 
   4292 	/* Set up the interrupt registers. */
   4293 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4294 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4295 	    ICR_RXO | ICR_RXT0;
   4296 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4297 
   4298 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4299 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4300 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4301 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4302 		reg |= KABGTXD_BGSQLBIAS;
   4303 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4304 	}
   4305 
   4306 	/* Set up the inter-packet gap. */
   4307 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4308 
   4309 	if (sc->sc_type >= WM_T_82543) {
   4310 		/*
   4311 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4312 		 * the multi queue function with MSI-X.
   4313 		 */
   4314 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4315 			CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
   4316 		else
   4317 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4318 	}
   4319 
   4320 	/* Set the VLAN ethernetype. */
   4321 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4322 
   4323 	/*
   4324 	 * Set up the transmit control register; we start out with
   4325 	 * a collision distance suitable for FDX, but update it whe
   4326 	 * we resolve the media type.
   4327 	 */
   4328 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4329 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4330 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4331 	if (sc->sc_type >= WM_T_82571)
   4332 		sc->sc_tctl |= TCTL_MULR;
   4333 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4334 
   4335 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4336 		/* Write TDT after TCTL.EN is set. See the document. */
   4337 		CSR_WRITE(sc, WMREG_TDT, 0);
   4338 	}
   4339 
   4340 	if (sc->sc_type == WM_T_80003) {
   4341 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4342 		reg &= ~TCTL_EXT_GCEX_MASK;
   4343 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4344 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4345 	}
   4346 
   4347 	/* Set the media. */
   4348 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4349 		goto out;
   4350 
   4351 	/* Configure for OS presence */
   4352 	wm_init_manageability(sc);
   4353 
   4354 	/*
   4355 	 * Set up the receive control register; we actually program
   4356 	 * the register when we set the receive filter.  Use multicast
   4357 	 * address offset type 0.
   4358 	 *
   4359 	 * Only the i82544 has the ability to strip the incoming
   4360 	 * CRC, so we don't enable that feature.
   4361 	 */
   4362 	sc->sc_mchash_type = 0;
   4363 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4364 	    | RCTL_MO(sc->sc_mchash_type);
   4365 
   4366 	/*
   4367 	 * The I350 has a bug where it always strips the CRC whether
   4368 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4369 	 */
   4370 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4371 	    || (sc->sc_type == WM_T_I210))
   4372 		sc->sc_rctl |= RCTL_SECRC;
   4373 
   4374 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4375 	    && (ifp->if_mtu > ETHERMTU)) {
   4376 		sc->sc_rctl |= RCTL_LPE;
   4377 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4378 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4379 	}
   4380 
   4381 	if (MCLBYTES == 2048) {
   4382 		sc->sc_rctl |= RCTL_2k;
   4383 	} else {
   4384 		if (sc->sc_type >= WM_T_82543) {
   4385 			switch (MCLBYTES) {
   4386 			case 4096:
   4387 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4388 				break;
   4389 			case 8192:
   4390 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4391 				break;
   4392 			case 16384:
   4393 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4394 				break;
   4395 			default:
   4396 				panic("wm_init: MCLBYTES %d unsupported",
   4397 				    MCLBYTES);
   4398 				break;
   4399 			}
   4400 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4401 	}
   4402 
   4403 	/* Set the receive filter. */
   4404 	wm_set_filter(sc);
   4405 
   4406 	/* Enable ECC */
   4407 	switch (sc->sc_type) {
   4408 	case WM_T_82571:
   4409 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4410 		reg |= PBA_ECC_CORR_EN;
   4411 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4412 		break;
   4413 	case WM_T_PCH_LPT:
   4414 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4415 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4416 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4417 
   4418 		reg = CSR_READ(sc, WMREG_CTRL);
   4419 		reg |= CTRL_MEHE;
   4420 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4421 		break;
   4422 	default:
   4423 		break;
   4424 	}
   4425 
   4426 	/* On 575 and later set RDT only if RX enabled */
   4427 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4428 		for (i = 0; i < WM_NRXDESC; i++)
   4429 			WM_INIT_RXDESC(sc, i);
   4430 
   4431 	sc->sc_stopping = false;
   4432 
   4433 	/* Start the one second link check clock. */
   4434 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4435 
   4436 	/* ...all done! */
   4437 	ifp->if_flags |= IFF_RUNNING;
   4438 	ifp->if_flags &= ~IFF_OACTIVE;
   4439 
   4440  out:
   4441 	sc->sc_if_flags = ifp->if_flags;
   4442 	if (error)
   4443 		log(LOG_ERR, "%s: interface not running\n",
   4444 		    device_xname(sc->sc_dev));
   4445 	return error;
   4446 }
   4447 
   4448 /*
   4449  * wm_stop:		[ifnet interface function]
   4450  *
   4451  *	Stop transmission on the interface.
   4452  */
   4453 static void
   4454 wm_stop(struct ifnet *ifp, int disable)
   4455 {
   4456 	struct wm_softc *sc = ifp->if_softc;
   4457 
   4458 	WM_BOTH_LOCK(sc);
   4459 	wm_stop_locked(ifp, disable);
   4460 	WM_BOTH_UNLOCK(sc);
   4461 }
   4462 
   4463 static void
   4464 wm_stop_locked(struct ifnet *ifp, int disable)
   4465 {
   4466 	struct wm_softc *sc = ifp->if_softc;
   4467 	struct wm_txsoft *txs;
   4468 	int i;
   4469 
   4470 	KASSERT(WM_BOTH_LOCKED(sc));
   4471 
   4472 	sc->sc_stopping = true;
   4473 
   4474 	/* Stop the one second clock. */
   4475 	callout_stop(&sc->sc_tick_ch);
   4476 
   4477 	/* Stop the 82547 Tx FIFO stall check timer. */
   4478 	if (sc->sc_type == WM_T_82547)
   4479 		callout_stop(&sc->sc_txfifo_ch);
   4480 
   4481 	if (sc->sc_flags & WM_F_HAS_MII) {
   4482 		/* Down the MII. */
   4483 		mii_down(&sc->sc_mii);
   4484 	} else {
   4485 #if 0
   4486 		/* Should we clear PHY's status properly? */
   4487 		wm_reset(sc);
   4488 #endif
   4489 	}
   4490 
   4491 	/* Stop the transmit and receive processes. */
   4492 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4493 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4494 	sc->sc_rctl &= ~RCTL_EN;
   4495 
   4496 	/*
   4497 	 * Clear the interrupt mask to ensure the device cannot assert its
   4498 	 * interrupt line.
   4499 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   4500 	 * any currently pending or shared interrupt.
   4501 	 */
   4502 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4503 	sc->sc_icr = 0;
   4504 
   4505 	/* Release any queued transmit buffers. */
   4506 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   4507 		txs = &sc->sc_txsoft[i];
   4508 		if (txs->txs_mbuf != NULL) {
   4509 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4510 			m_freem(txs->txs_mbuf);
   4511 			txs->txs_mbuf = NULL;
   4512 		}
   4513 	}
   4514 
   4515 	/* Mark the interface as down and cancel the watchdog timer. */
   4516 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4517 	ifp->if_timer = 0;
   4518 
   4519 	if (disable)
   4520 		wm_rxdrain(sc);
   4521 
   4522 #if 0 /* notyet */
   4523 	if (sc->sc_type >= WM_T_82544)
   4524 		CSR_WRITE(sc, WMREG_WUC, 0);
   4525 #endif
   4526 }
   4527 
   4528 /*
   4529  * wm_tx_offload:
   4530  *
   4531  *	Set up TCP/IP checksumming parameters for the
   4532  *	specified packet.
   4533  */
   4534 static int
   4535 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4536     uint8_t *fieldsp)
   4537 {
   4538 	struct mbuf *m0 = txs->txs_mbuf;
   4539 	struct livengood_tcpip_ctxdesc *t;
   4540 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4541 	uint32_t ipcse;
   4542 	struct ether_header *eh;
   4543 	int offset, iphl;
   4544 	uint8_t fields;
   4545 
   4546 	/*
   4547 	 * XXX It would be nice if the mbuf pkthdr had offset
   4548 	 * fields for the protocol headers.
   4549 	 */
   4550 
   4551 	eh = mtod(m0, struct ether_header *);
   4552 	switch (htons(eh->ether_type)) {
   4553 	case ETHERTYPE_IP:
   4554 	case ETHERTYPE_IPV6:
   4555 		offset = ETHER_HDR_LEN;
   4556 		break;
   4557 
   4558 	case ETHERTYPE_VLAN:
   4559 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4560 		break;
   4561 
   4562 	default:
   4563 		/*
   4564 		 * Don't support this protocol or encapsulation.
   4565 		 */
   4566 		*fieldsp = 0;
   4567 		*cmdp = 0;
   4568 		return 0;
   4569 	}
   4570 
   4571 	if ((m0->m_pkthdr.csum_flags &
   4572 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4573 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4574 	} else {
   4575 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4576 	}
   4577 	ipcse = offset + iphl - 1;
   4578 
   4579 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4580 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4581 	seg = 0;
   4582 	fields = 0;
   4583 
   4584 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4585 		int hlen = offset + iphl;
   4586 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4587 
   4588 		if (__predict_false(m0->m_len <
   4589 				    (hlen + sizeof(struct tcphdr)))) {
   4590 			/*
   4591 			 * TCP/IP headers are not in the first mbuf; we need
   4592 			 * to do this the slow and painful way.  Let's just
   4593 			 * hope this doesn't happen very often.
   4594 			 */
   4595 			struct tcphdr th;
   4596 
   4597 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4598 
   4599 			m_copydata(m0, hlen, sizeof(th), &th);
   4600 			if (v4) {
   4601 				struct ip ip;
   4602 
   4603 				m_copydata(m0, offset, sizeof(ip), &ip);
   4604 				ip.ip_len = 0;
   4605 				m_copyback(m0,
   4606 				    offset + offsetof(struct ip, ip_len),
   4607 				    sizeof(ip.ip_len), &ip.ip_len);
   4608 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4609 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4610 			} else {
   4611 				struct ip6_hdr ip6;
   4612 
   4613 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4614 				ip6.ip6_plen = 0;
   4615 				m_copyback(m0,
   4616 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4617 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4618 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4619 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4620 			}
   4621 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4622 			    sizeof(th.th_sum), &th.th_sum);
   4623 
   4624 			hlen += th.th_off << 2;
   4625 		} else {
   4626 			/*
   4627 			 * TCP/IP headers are in the first mbuf; we can do
   4628 			 * this the easy way.
   4629 			 */
   4630 			struct tcphdr *th;
   4631 
   4632 			if (v4) {
   4633 				struct ip *ip =
   4634 				    (void *)(mtod(m0, char *) + offset);
   4635 				th = (void *)(mtod(m0, char *) + hlen);
   4636 
   4637 				ip->ip_len = 0;
   4638 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4639 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4640 			} else {
   4641 				struct ip6_hdr *ip6 =
   4642 				    (void *)(mtod(m0, char *) + offset);
   4643 				th = (void *)(mtod(m0, char *) + hlen);
   4644 
   4645 				ip6->ip6_plen = 0;
   4646 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4647 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4648 			}
   4649 			hlen += th->th_off << 2;
   4650 		}
   4651 
   4652 		if (v4) {
   4653 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4654 			cmdlen |= WTX_TCPIP_CMD_IP;
   4655 		} else {
   4656 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4657 			ipcse = 0;
   4658 		}
   4659 		cmd |= WTX_TCPIP_CMD_TSE;
   4660 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4661 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4662 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4663 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4664 	}
   4665 
   4666 	/*
   4667 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4668 	 * offload feature, if we load the context descriptor, we
   4669 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4670 	 */
   4671 
   4672 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4673 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4674 	    WTX_TCPIP_IPCSE(ipcse);
   4675 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4676 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4677 		fields |= WTX_IXSM;
   4678 	}
   4679 
   4680 	offset += iphl;
   4681 
   4682 	if (m0->m_pkthdr.csum_flags &
   4683 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4684 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4685 		fields |= WTX_TXSM;
   4686 		tucs = WTX_TCPIP_TUCSS(offset) |
   4687 		    WTX_TCPIP_TUCSO(offset +
   4688 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4689 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4690 	} else if ((m0->m_pkthdr.csum_flags &
   4691 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4692 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4693 		fields |= WTX_TXSM;
   4694 		tucs = WTX_TCPIP_TUCSS(offset) |
   4695 		    WTX_TCPIP_TUCSO(offset +
   4696 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4697 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4698 	} else {
   4699 		/* Just initialize it to a valid TCP context. */
   4700 		tucs = WTX_TCPIP_TUCSS(offset) |
   4701 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4702 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4703 	}
   4704 
   4705 	/* Fill in the context descriptor. */
   4706 	t = (struct livengood_tcpip_ctxdesc *)
   4707 	    &sc->sc_txdescs[sc->sc_txnext];
   4708 	t->tcpip_ipcs = htole32(ipcs);
   4709 	t->tcpip_tucs = htole32(tucs);
   4710 	t->tcpip_cmdlen = htole32(cmdlen);
   4711 	t->tcpip_seg = htole32(seg);
   4712 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4713 
   4714 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4715 	txs->txs_ndesc++;
   4716 
   4717 	*cmdp = cmd;
   4718 	*fieldsp = fields;
   4719 
   4720 	return 0;
   4721 }
   4722 
   4723 static void
   4724 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4725 {
   4726 	struct mbuf *m;
   4727 	int i;
   4728 
   4729 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4730 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4731 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4732 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4733 		    m->m_data, m->m_len, m->m_flags);
   4734 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4735 	    i, i == 1 ? "" : "s");
   4736 }
   4737 
   4738 /*
   4739  * wm_82547_txfifo_stall:
   4740  *
   4741  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4742  *	reset the FIFO pointers, and restart packet transmission.
   4743  */
   4744 static void
   4745 wm_82547_txfifo_stall(void *arg)
   4746 {
   4747 	struct wm_softc *sc = arg;
   4748 #ifndef WM_MPSAFE
   4749 	int s;
   4750 
   4751 	s = splnet();
   4752 #endif
   4753 	WM_TX_LOCK(sc);
   4754 
   4755 	if (sc->sc_stopping)
   4756 		goto out;
   4757 
   4758 	if (sc->sc_txfifo_stall) {
   4759 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   4760 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4761 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4762 			/*
   4763 			 * Packets have drained.  Stop transmitter, reset
   4764 			 * FIFO pointers, restart transmitter, and kick
   4765 			 * the packet queue.
   4766 			 */
   4767 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4768 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4769 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   4770 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   4771 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   4772 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   4773 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4774 			CSR_WRITE_FLUSH(sc);
   4775 
   4776 			sc->sc_txfifo_head = 0;
   4777 			sc->sc_txfifo_stall = 0;
   4778 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4779 		} else {
   4780 			/*
   4781 			 * Still waiting for packets to drain; try again in
   4782 			 * another tick.
   4783 			 */
   4784 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4785 		}
   4786 	}
   4787 
   4788 out:
   4789 	WM_TX_UNLOCK(sc);
   4790 #ifndef WM_MPSAFE
   4791 	splx(s);
   4792 #endif
   4793 }
   4794 
   4795 /*
   4796  * wm_82547_txfifo_bugchk:
   4797  *
   4798  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   4799  *	prevent enqueueing a packet that would wrap around the end
   4800  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   4801  *
   4802  *	We do this by checking the amount of space before the end
   4803  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   4804  *	the Tx FIFO, wait for all remaining packets to drain, reset
   4805  *	the internal FIFO pointers to the beginning, and restart
   4806  *	transmission on the interface.
   4807  */
   4808 #define	WM_FIFO_HDR		0x10
   4809 #define	WM_82547_PAD_LEN	0x3e0
   4810 static int
   4811 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   4812 {
   4813 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   4814 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   4815 
   4816 	/* Just return if already stalled. */
   4817 	if (sc->sc_txfifo_stall)
   4818 		return 1;
   4819 
   4820 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4821 		/* Stall only occurs in half-duplex mode. */
   4822 		goto send_packet;
   4823 	}
   4824 
   4825 	if (len >= WM_82547_PAD_LEN + space) {
   4826 		sc->sc_txfifo_stall = 1;
   4827 		callout_schedule(&sc->sc_txfifo_ch, 1);
   4828 		return 1;
   4829 	}
   4830 
   4831  send_packet:
   4832 	sc->sc_txfifo_head += len;
   4833 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   4834 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   4835 
   4836 	return 0;
   4837 }
   4838 
   4839 /*
   4840  * wm_start:		[ifnet interface function]
   4841  *
   4842  *	Start packet transmission on the interface.
   4843  */
   4844 static void
   4845 wm_start(struct ifnet *ifp)
   4846 {
   4847 	struct wm_softc *sc = ifp->if_softc;
   4848 
   4849 	WM_TX_LOCK(sc);
   4850 	if (!sc->sc_stopping)
   4851 		wm_start_locked(ifp);
   4852 	WM_TX_UNLOCK(sc);
   4853 }
   4854 
   4855 static void
   4856 wm_start_locked(struct ifnet *ifp)
   4857 {
   4858 	struct wm_softc *sc = ifp->if_softc;
   4859 	struct mbuf *m0;
   4860 	struct m_tag *mtag;
   4861 	struct wm_txsoft *txs;
   4862 	bus_dmamap_t dmamap;
   4863 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   4864 	bus_addr_t curaddr;
   4865 	bus_size_t seglen, curlen;
   4866 	uint32_t cksumcmd;
   4867 	uint8_t cksumfields;
   4868 
   4869 	KASSERT(WM_TX_LOCKED(sc));
   4870 
   4871 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4872 		return;
   4873 
   4874 	/* Remember the previous number of free descriptors. */
   4875 	ofree = sc->sc_txfree;
   4876 
   4877 	/*
   4878 	 * Loop through the send queue, setting up transmit descriptors
   4879 	 * until we drain the queue, or use up all available transmit
   4880 	 * descriptors.
   4881 	 */
   4882 	for (;;) {
   4883 		m0 = NULL;
   4884 
   4885 		/* Get a work queue entry. */
   4886 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4887 			wm_txintr(sc);
   4888 			if (sc->sc_txsfree == 0) {
   4889 				DPRINTF(WM_DEBUG_TX,
   4890 				    ("%s: TX: no free job descriptors\n",
   4891 					device_xname(sc->sc_dev)));
   4892 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4893 				break;
   4894 			}
   4895 		}
   4896 
   4897 		/* Grab a packet off the queue. */
   4898 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4899 		if (m0 == NULL)
   4900 			break;
   4901 
   4902 		DPRINTF(WM_DEBUG_TX,
   4903 		    ("%s: TX: have packet to transmit: %p\n",
   4904 		    device_xname(sc->sc_dev), m0));
   4905 
   4906 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4907 		dmamap = txs->txs_dmamap;
   4908 
   4909 		use_tso = (m0->m_pkthdr.csum_flags &
   4910 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   4911 
   4912 		/*
   4913 		 * So says the Linux driver:
   4914 		 * The controller does a simple calculation to make sure
   4915 		 * there is enough room in the FIFO before initiating the
   4916 		 * DMA for each buffer.  The calc is:
   4917 		 *	4 = ceil(buffer len / MSS)
   4918 		 * To make sure we don't overrun the FIFO, adjust the max
   4919 		 * buffer len if the MSS drops.
   4920 		 */
   4921 		dmamap->dm_maxsegsz =
   4922 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   4923 		    ? m0->m_pkthdr.segsz << 2
   4924 		    : WTX_MAX_LEN;
   4925 
   4926 		/*
   4927 		 * Load the DMA map.  If this fails, the packet either
   4928 		 * didn't fit in the allotted number of segments, or we
   4929 		 * were short on resources.  For the too-many-segments
   4930 		 * case, we simply report an error and drop the packet,
   4931 		 * since we can't sanely copy a jumbo packet to a single
   4932 		 * buffer.
   4933 		 */
   4934 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   4935 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   4936 		if (error) {
   4937 			if (error == EFBIG) {
   4938 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4939 				log(LOG_ERR, "%s: Tx packet consumes too many "
   4940 				    "DMA segments, dropping...\n",
   4941 				    device_xname(sc->sc_dev));
   4942 				wm_dump_mbuf_chain(sc, m0);
   4943 				m_freem(m0);
   4944 				continue;
   4945 			}
   4946 			/*  Short on resources, just stop for now. */
   4947 			DPRINTF(WM_DEBUG_TX,
   4948 			    ("%s: TX: dmamap load failed: %d\n",
   4949 			    device_xname(sc->sc_dev), error));
   4950 			break;
   4951 		}
   4952 
   4953 		segs_needed = dmamap->dm_nsegs;
   4954 		if (use_tso) {
   4955 			/* For sentinel descriptor; see below. */
   4956 			segs_needed++;
   4957 		}
   4958 
   4959 		/*
   4960 		 * Ensure we have enough descriptors free to describe
   4961 		 * the packet.  Note, we always reserve one descriptor
   4962 		 * at the end of the ring due to the semantics of the
   4963 		 * TDT register, plus one more in the event we need
   4964 		 * to load offload context.
   4965 		 */
   4966 		if (segs_needed > sc->sc_txfree - 2) {
   4967 			/*
   4968 			 * Not enough free descriptors to transmit this
   4969 			 * packet.  We haven't committed anything yet,
   4970 			 * so just unload the DMA map, put the packet
   4971 			 * pack on the queue, and punt.  Notify the upper
   4972 			 * layer that there are no more slots left.
   4973 			 */
   4974 			DPRINTF(WM_DEBUG_TX,
   4975 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   4976 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   4977 			    segs_needed, sc->sc_txfree - 1));
   4978 			ifp->if_flags |= IFF_OACTIVE;
   4979 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4980 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   4981 			break;
   4982 		}
   4983 
   4984 		/*
   4985 		 * Check for 82547 Tx FIFO bug.  We need to do this
   4986 		 * once we know we can transmit the packet, since we
   4987 		 * do some internal FIFO space accounting here.
   4988 		 */
   4989 		if (sc->sc_type == WM_T_82547 &&
   4990 		    wm_82547_txfifo_bugchk(sc, m0)) {
   4991 			DPRINTF(WM_DEBUG_TX,
   4992 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   4993 			    device_xname(sc->sc_dev)));
   4994 			ifp->if_flags |= IFF_OACTIVE;
   4995 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4996 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   4997 			break;
   4998 		}
   4999 
   5000 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5001 
   5002 		DPRINTF(WM_DEBUG_TX,
   5003 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5004 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5005 
   5006 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5007 
   5008 		/*
   5009 		 * Store a pointer to the packet so that we can free it
   5010 		 * later.
   5011 		 *
   5012 		 * Initially, we consider the number of descriptors the
   5013 		 * packet uses the number of DMA segments.  This may be
   5014 		 * incremented by 1 if we do checksum offload (a descriptor
   5015 		 * is used to set the checksum context).
   5016 		 */
   5017 		txs->txs_mbuf = m0;
   5018 		txs->txs_firstdesc = sc->sc_txnext;
   5019 		txs->txs_ndesc = segs_needed;
   5020 
   5021 		/* Set up offload parameters for this packet. */
   5022 		if (m0->m_pkthdr.csum_flags &
   5023 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5024 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5025 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5026 			if (wm_tx_offload(sc, txs, &cksumcmd,
   5027 					  &cksumfields) != 0) {
   5028 				/* Error message already displayed. */
   5029 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5030 				continue;
   5031 			}
   5032 		} else {
   5033 			cksumcmd = 0;
   5034 			cksumfields = 0;
   5035 		}
   5036 
   5037 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   5038 
   5039 		/* Sync the DMA map. */
   5040 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5041 		    BUS_DMASYNC_PREWRITE);
   5042 
   5043 		/* Initialize the transmit descriptor. */
   5044 		for (nexttx = sc->sc_txnext, seg = 0;
   5045 		     seg < dmamap->dm_nsegs; seg++) {
   5046 			for (seglen = dmamap->dm_segs[seg].ds_len,
   5047 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   5048 			     seglen != 0;
   5049 			     curaddr += curlen, seglen -= curlen,
   5050 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   5051 				curlen = seglen;
   5052 
   5053 				/*
   5054 				 * So says the Linux driver:
   5055 				 * Work around for premature descriptor
   5056 				 * write-backs in TSO mode.  Append a
   5057 				 * 4-byte sentinel descriptor.
   5058 				 */
   5059 				if (use_tso &&
   5060 				    seg == dmamap->dm_nsegs - 1 &&
   5061 				    curlen > 8)
   5062 					curlen -= 4;
   5063 
   5064 				wm_set_dma_addr(
   5065 				    &sc->sc_txdescs[nexttx].wtx_addr,
   5066 				    curaddr);
   5067 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   5068 				    htole32(cksumcmd | curlen);
   5069 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   5070 				    0;
   5071 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   5072 				    cksumfields;
   5073 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5074 				lasttx = nexttx;
   5075 
   5076 				DPRINTF(WM_DEBUG_TX,
   5077 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   5078 				     "len %#04zx\n",
   5079 				    device_xname(sc->sc_dev), nexttx,
   5080 				    (uint64_t)curaddr, curlen));
   5081 			}
   5082 		}
   5083 
   5084 		KASSERT(lasttx != -1);
   5085 
   5086 		/*
   5087 		 * Set up the command byte on the last descriptor of
   5088 		 * the packet.  If we're in the interrupt delay window,
   5089 		 * delay the interrupt.
   5090 		 */
   5091 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5092 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5093 
   5094 		/*
   5095 		 * If VLANs are enabled and the packet has a VLAN tag, set
   5096 		 * up the descriptor to encapsulate the packet for us.
   5097 		 *
   5098 		 * This is only valid on the last descriptor of the packet.
   5099 		 */
   5100 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5101 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5102 			    htole32(WTX_CMD_VLE);
   5103 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   5104 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5105 		}
   5106 
   5107 		txs->txs_lastdesc = lasttx;
   5108 
   5109 		DPRINTF(WM_DEBUG_TX,
   5110 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5111 		    device_xname(sc->sc_dev),
   5112 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5113 
   5114 		/* Sync the descriptors we're using. */
   5115 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5116 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5117 
   5118 		/* Give the packet to the chip. */
   5119 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5120 
   5121 		DPRINTF(WM_DEBUG_TX,
   5122 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5123 
   5124 		DPRINTF(WM_DEBUG_TX,
   5125 		    ("%s: TX: finished transmitting packet, job %d\n",
   5126 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5127 
   5128 		/* Advance the tx pointer. */
   5129 		sc->sc_txfree -= txs->txs_ndesc;
   5130 		sc->sc_txnext = nexttx;
   5131 
   5132 		sc->sc_txsfree--;
   5133 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5134 
   5135 		/* Pass the packet to any BPF listeners. */
   5136 		bpf_mtap(ifp, m0);
   5137 	}
   5138 
   5139 	if (m0 != NULL) {
   5140 		ifp->if_flags |= IFF_OACTIVE;
   5141 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5142 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5143 		m_freem(m0);
   5144 	}
   5145 
   5146 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5147 		/* No more slots; notify upper layer. */
   5148 		ifp->if_flags |= IFF_OACTIVE;
   5149 	}
   5150 
   5151 	if (sc->sc_txfree != ofree) {
   5152 		/* Set a watchdog timer in case the chip flakes out. */
   5153 		ifp->if_timer = 5;
   5154 	}
   5155 }
   5156 
   5157 /*
   5158  * wm_nq_tx_offload:
   5159  *
   5160  *	Set up TCP/IP checksumming parameters for the
   5161  *	specified packet, for NEWQUEUE devices
   5162  */
   5163 static int
   5164 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   5165     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   5166 {
   5167 	struct mbuf *m0 = txs->txs_mbuf;
   5168 	struct m_tag *mtag;
   5169 	uint32_t vl_len, mssidx, cmdc;
   5170 	struct ether_header *eh;
   5171 	int offset, iphl;
   5172 
   5173 	/*
   5174 	 * XXX It would be nice if the mbuf pkthdr had offset
   5175 	 * fields for the protocol headers.
   5176 	 */
   5177 	*cmdlenp = 0;
   5178 	*fieldsp = 0;
   5179 
   5180 	eh = mtod(m0, struct ether_header *);
   5181 	switch (htons(eh->ether_type)) {
   5182 	case ETHERTYPE_IP:
   5183 	case ETHERTYPE_IPV6:
   5184 		offset = ETHER_HDR_LEN;
   5185 		break;
   5186 
   5187 	case ETHERTYPE_VLAN:
   5188 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5189 		break;
   5190 
   5191 	default:
   5192 		/* Don't support this protocol or encapsulation. */
   5193 		*do_csum = false;
   5194 		return 0;
   5195 	}
   5196 	*do_csum = true;
   5197 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   5198 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   5199 
   5200 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   5201 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   5202 
   5203 	if ((m0->m_pkthdr.csum_flags &
   5204 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   5205 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5206 	} else {
   5207 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5208 	}
   5209 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   5210 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   5211 
   5212 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5213 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   5214 		     << NQTXC_VLLEN_VLAN_SHIFT);
   5215 		*cmdlenp |= NQTX_CMD_VLE;
   5216 	}
   5217 
   5218 	mssidx = 0;
   5219 
   5220 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5221 		int hlen = offset + iphl;
   5222 		int tcp_hlen;
   5223 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5224 
   5225 		if (__predict_false(m0->m_len <
   5226 				    (hlen + sizeof(struct tcphdr)))) {
   5227 			/*
   5228 			 * TCP/IP headers are not in the first mbuf; we need
   5229 			 * to do this the slow and painful way.  Let's just
   5230 			 * hope this doesn't happen very often.
   5231 			 */
   5232 			struct tcphdr th;
   5233 
   5234 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5235 
   5236 			m_copydata(m0, hlen, sizeof(th), &th);
   5237 			if (v4) {
   5238 				struct ip ip;
   5239 
   5240 				m_copydata(m0, offset, sizeof(ip), &ip);
   5241 				ip.ip_len = 0;
   5242 				m_copyback(m0,
   5243 				    offset + offsetof(struct ip, ip_len),
   5244 				    sizeof(ip.ip_len), &ip.ip_len);
   5245 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5246 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5247 			} else {
   5248 				struct ip6_hdr ip6;
   5249 
   5250 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5251 				ip6.ip6_plen = 0;
   5252 				m_copyback(m0,
   5253 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5254 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5255 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5256 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5257 			}
   5258 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5259 			    sizeof(th.th_sum), &th.th_sum);
   5260 
   5261 			tcp_hlen = th.th_off << 2;
   5262 		} else {
   5263 			/*
   5264 			 * TCP/IP headers are in the first mbuf; we can do
   5265 			 * this the easy way.
   5266 			 */
   5267 			struct tcphdr *th;
   5268 
   5269 			if (v4) {
   5270 				struct ip *ip =
   5271 				    (void *)(mtod(m0, char *) + offset);
   5272 				th = (void *)(mtod(m0, char *) + hlen);
   5273 
   5274 				ip->ip_len = 0;
   5275 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5276 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5277 			} else {
   5278 				struct ip6_hdr *ip6 =
   5279 				    (void *)(mtod(m0, char *) + offset);
   5280 				th = (void *)(mtod(m0, char *) + hlen);
   5281 
   5282 				ip6->ip6_plen = 0;
   5283 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5284 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5285 			}
   5286 			tcp_hlen = th->th_off << 2;
   5287 		}
   5288 		hlen += tcp_hlen;
   5289 		*cmdlenp |= NQTX_CMD_TSE;
   5290 
   5291 		if (v4) {
   5292 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   5293 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   5294 		} else {
   5295 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   5296 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   5297 		}
   5298 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   5299 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   5300 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   5301 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   5302 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   5303 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   5304 	} else {
   5305 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   5306 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   5307 	}
   5308 
   5309 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   5310 		*fieldsp |= NQTXD_FIELDS_IXSM;
   5311 		cmdc |= NQTXC_CMD_IP4;
   5312 	}
   5313 
   5314 	if (m0->m_pkthdr.csum_flags &
   5315 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   5316 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   5317 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   5318 			cmdc |= NQTXC_CMD_TCP;
   5319 		} else {
   5320 			cmdc |= NQTXC_CMD_UDP;
   5321 		}
   5322 		cmdc |= NQTXC_CMD_IP4;
   5323 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   5324 	}
   5325 	if (m0->m_pkthdr.csum_flags &
   5326 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   5327 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   5328 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   5329 			cmdc |= NQTXC_CMD_TCP;
   5330 		} else {
   5331 			cmdc |= NQTXC_CMD_UDP;
   5332 		}
   5333 		cmdc |= NQTXC_CMD_IP6;
   5334 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   5335 	}
   5336 
   5337 	/* Fill in the context descriptor. */
   5338 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   5339 	    htole32(vl_len);
   5340 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   5341 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   5342 	    htole32(cmdc);
   5343 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   5344 	    htole32(mssidx);
   5345 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   5346 	DPRINTF(WM_DEBUG_TX,
   5347 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   5348 	    sc->sc_txnext, 0, vl_len));
   5349 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   5350 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   5351 	txs->txs_ndesc++;
   5352 	return 0;
   5353 }
   5354 
   5355 /*
   5356  * wm_nq_start:		[ifnet interface function]
   5357  *
   5358  *	Start packet transmission on the interface for NEWQUEUE devices
   5359  */
   5360 static void
   5361 wm_nq_start(struct ifnet *ifp)
   5362 {
   5363 	struct wm_softc *sc = ifp->if_softc;
   5364 
   5365 	WM_TX_LOCK(sc);
   5366 	if (!sc->sc_stopping)
   5367 		wm_nq_start_locked(ifp);
   5368 	WM_TX_UNLOCK(sc);
   5369 }
   5370 
   5371 static void
   5372 wm_nq_start_locked(struct ifnet *ifp)
   5373 {
   5374 	struct wm_softc *sc = ifp->if_softc;
   5375 	struct mbuf *m0;
   5376 	struct m_tag *mtag;
   5377 	struct wm_txsoft *txs;
   5378 	bus_dmamap_t dmamap;
   5379 	int error, nexttx, lasttx = -1, seg, segs_needed;
   5380 	bool do_csum, sent;
   5381 
   5382 	KASSERT(WM_TX_LOCKED(sc));
   5383 
   5384 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   5385 		return;
   5386 
   5387 	sent = false;
   5388 
   5389 	/*
   5390 	 * Loop through the send queue, setting up transmit descriptors
   5391 	 * until we drain the queue, or use up all available transmit
   5392 	 * descriptors.
   5393 	 */
   5394 	for (;;) {
   5395 		m0 = NULL;
   5396 
   5397 		/* Get a work queue entry. */
   5398 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   5399 			wm_txintr(sc);
   5400 			if (sc->sc_txsfree == 0) {
   5401 				DPRINTF(WM_DEBUG_TX,
   5402 				    ("%s: TX: no free job descriptors\n",
   5403 					device_xname(sc->sc_dev)));
   5404 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   5405 				break;
   5406 			}
   5407 		}
   5408 
   5409 		/* Grab a packet off the queue. */
   5410 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   5411 		if (m0 == NULL)
   5412 			break;
   5413 
   5414 		DPRINTF(WM_DEBUG_TX,
   5415 		    ("%s: TX: have packet to transmit: %p\n",
   5416 		    device_xname(sc->sc_dev), m0));
   5417 
   5418 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   5419 		dmamap = txs->txs_dmamap;
   5420 
   5421 		/*
   5422 		 * Load the DMA map.  If this fails, the packet either
   5423 		 * didn't fit in the allotted number of segments, or we
   5424 		 * were short on resources.  For the too-many-segments
   5425 		 * case, we simply report an error and drop the packet,
   5426 		 * since we can't sanely copy a jumbo packet to a single
   5427 		 * buffer.
   5428 		 */
   5429 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   5430 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   5431 		if (error) {
   5432 			if (error == EFBIG) {
   5433 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5434 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5435 				    "DMA segments, dropping...\n",
   5436 				    device_xname(sc->sc_dev));
   5437 				wm_dump_mbuf_chain(sc, m0);
   5438 				m_freem(m0);
   5439 				continue;
   5440 			}
   5441 			/* Short on resources, just stop for now. */
   5442 			DPRINTF(WM_DEBUG_TX,
   5443 			    ("%s: TX: dmamap load failed: %d\n",
   5444 			    device_xname(sc->sc_dev), error));
   5445 			break;
   5446 		}
   5447 
   5448 		segs_needed = dmamap->dm_nsegs;
   5449 
   5450 		/*
   5451 		 * Ensure we have enough descriptors free to describe
   5452 		 * the packet.  Note, we always reserve one descriptor
   5453 		 * at the end of the ring due to the semantics of the
   5454 		 * TDT register, plus one more in the event we need
   5455 		 * to load offload context.
   5456 		 */
   5457 		if (segs_needed > sc->sc_txfree - 2) {
   5458 			/*
   5459 			 * Not enough free descriptors to transmit this
   5460 			 * packet.  We haven't committed anything yet,
   5461 			 * so just unload the DMA map, put the packet
   5462 			 * pack on the queue, and punt.  Notify the upper
   5463 			 * layer that there are no more slots left.
   5464 			 */
   5465 			DPRINTF(WM_DEBUG_TX,
   5466 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5467 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5468 			    segs_needed, sc->sc_txfree - 1));
   5469 			ifp->if_flags |= IFF_OACTIVE;
   5470 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5471 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5472 			break;
   5473 		}
   5474 
   5475 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5476 
   5477 		DPRINTF(WM_DEBUG_TX,
   5478 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5479 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5480 
   5481 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5482 
   5483 		/*
   5484 		 * Store a pointer to the packet so that we can free it
   5485 		 * later.
   5486 		 *
   5487 		 * Initially, we consider the number of descriptors the
   5488 		 * packet uses the number of DMA segments.  This may be
   5489 		 * incremented by 1 if we do checksum offload (a descriptor
   5490 		 * is used to set the checksum context).
   5491 		 */
   5492 		txs->txs_mbuf = m0;
   5493 		txs->txs_firstdesc = sc->sc_txnext;
   5494 		txs->txs_ndesc = segs_needed;
   5495 
   5496 		/* Set up offload parameters for this packet. */
   5497 		uint32_t cmdlen, fields, dcmdlen;
   5498 		if (m0->m_pkthdr.csum_flags &
   5499 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5500 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5501 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5502 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   5503 			    &do_csum) != 0) {
   5504 				/* Error message already displayed. */
   5505 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5506 				continue;
   5507 			}
   5508 		} else {
   5509 			do_csum = false;
   5510 			cmdlen = 0;
   5511 			fields = 0;
   5512 		}
   5513 
   5514 		/* Sync the DMA map. */
   5515 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5516 		    BUS_DMASYNC_PREWRITE);
   5517 
   5518 		/* Initialize the first transmit descriptor. */
   5519 		nexttx = sc->sc_txnext;
   5520 		if (!do_csum) {
   5521 			/* setup a legacy descriptor */
   5522 			wm_set_dma_addr(
   5523 			    &sc->sc_txdescs[nexttx].wtx_addr,
   5524 			    dmamap->dm_segs[0].ds_addr);
   5525 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   5526 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   5527 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   5528 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   5529 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   5530 			    NULL) {
   5531 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   5532 				    htole32(WTX_CMD_VLE);
   5533 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   5534 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5535 			} else {
   5536 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5537 			}
   5538 			dcmdlen = 0;
   5539 		} else {
   5540 			/* setup an advanced data descriptor */
   5541 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5542 			    htole64(dmamap->dm_segs[0].ds_addr);
   5543 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   5544 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5545 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   5546 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   5547 			    htole32(fields);
   5548 			DPRINTF(WM_DEBUG_TX,
   5549 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   5550 			    device_xname(sc->sc_dev), nexttx,
   5551 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   5552 			DPRINTF(WM_DEBUG_TX,
   5553 			    ("\t 0x%08x%08x\n", fields,
   5554 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   5555 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   5556 		}
   5557 
   5558 		lasttx = nexttx;
   5559 		nexttx = WM_NEXTTX(sc, nexttx);
   5560 		/*
   5561 		 * fill in the next descriptors. legacy or adcanced format
   5562 		 * is the same here
   5563 		 */
   5564 		for (seg = 1; seg < dmamap->dm_nsegs;
   5565 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   5566 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5567 			    htole64(dmamap->dm_segs[seg].ds_addr);
   5568 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5569 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   5570 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   5571 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   5572 			lasttx = nexttx;
   5573 
   5574 			DPRINTF(WM_DEBUG_TX,
   5575 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   5576 			     "len %#04zx\n",
   5577 			    device_xname(sc->sc_dev), nexttx,
   5578 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   5579 			    dmamap->dm_segs[seg].ds_len));
   5580 		}
   5581 
   5582 		KASSERT(lasttx != -1);
   5583 
   5584 		/*
   5585 		 * Set up the command byte on the last descriptor of
   5586 		 * the packet.  If we're in the interrupt delay window,
   5587 		 * delay the interrupt.
   5588 		 */
   5589 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   5590 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   5591 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5592 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5593 
   5594 		txs->txs_lastdesc = lasttx;
   5595 
   5596 		DPRINTF(WM_DEBUG_TX,
   5597 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5598 		    device_xname(sc->sc_dev),
   5599 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5600 
   5601 		/* Sync the descriptors we're using. */
   5602 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5603 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5604 
   5605 		/* Give the packet to the chip. */
   5606 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5607 		sent = true;
   5608 
   5609 		DPRINTF(WM_DEBUG_TX,
   5610 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5611 
   5612 		DPRINTF(WM_DEBUG_TX,
   5613 		    ("%s: TX: finished transmitting packet, job %d\n",
   5614 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5615 
   5616 		/* Advance the tx pointer. */
   5617 		sc->sc_txfree -= txs->txs_ndesc;
   5618 		sc->sc_txnext = nexttx;
   5619 
   5620 		sc->sc_txsfree--;
   5621 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5622 
   5623 		/* Pass the packet to any BPF listeners. */
   5624 		bpf_mtap(ifp, m0);
   5625 	}
   5626 
   5627 	if (m0 != NULL) {
   5628 		ifp->if_flags |= IFF_OACTIVE;
   5629 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5630 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5631 		m_freem(m0);
   5632 	}
   5633 
   5634 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5635 		/* No more slots; notify upper layer. */
   5636 		ifp->if_flags |= IFF_OACTIVE;
   5637 	}
   5638 
   5639 	if (sent) {
   5640 		/* Set a watchdog timer in case the chip flakes out. */
   5641 		ifp->if_timer = 5;
   5642 	}
   5643 }
   5644 
   5645 /* Interrupt */
   5646 
   5647 /*
   5648  * wm_txintr:
   5649  *
   5650  *	Helper; handle transmit interrupts.
   5651  */
   5652 static void
   5653 wm_txintr(struct wm_softc *sc)
   5654 {
   5655 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5656 	struct wm_txsoft *txs;
   5657 	uint8_t status;
   5658 	int i;
   5659 
   5660 	if (sc->sc_stopping)
   5661 		return;
   5662 
   5663 	ifp->if_flags &= ~IFF_OACTIVE;
   5664 
   5665 	/*
   5666 	 * Go through the Tx list and free mbufs for those
   5667 	 * frames which have been transmitted.
   5668 	 */
   5669 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   5670 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   5671 		txs = &sc->sc_txsoft[i];
   5672 
   5673 		DPRINTF(WM_DEBUG_TX,
   5674 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   5675 
   5676 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   5677 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5678 
   5679 		status =
   5680 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   5681 		if ((status & WTX_ST_DD) == 0) {
   5682 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   5683 			    BUS_DMASYNC_PREREAD);
   5684 			break;
   5685 		}
   5686 
   5687 		DPRINTF(WM_DEBUG_TX,
   5688 		    ("%s: TX: job %d done: descs %d..%d\n",
   5689 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   5690 		    txs->txs_lastdesc));
   5691 
   5692 		/*
   5693 		 * XXX We should probably be using the statistics
   5694 		 * XXX registers, but I don't know if they exist
   5695 		 * XXX on chips before the i82544.
   5696 		 */
   5697 
   5698 #ifdef WM_EVENT_COUNTERS
   5699 		if (status & WTX_ST_TU)
   5700 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   5701 #endif /* WM_EVENT_COUNTERS */
   5702 
   5703 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   5704 			ifp->if_oerrors++;
   5705 			if (status & WTX_ST_LC)
   5706 				log(LOG_WARNING, "%s: late collision\n",
   5707 				    device_xname(sc->sc_dev));
   5708 			else if (status & WTX_ST_EC) {
   5709 				ifp->if_collisions += 16;
   5710 				log(LOG_WARNING, "%s: excessive collisions\n",
   5711 				    device_xname(sc->sc_dev));
   5712 			}
   5713 		} else
   5714 			ifp->if_opackets++;
   5715 
   5716 		sc->sc_txfree += txs->txs_ndesc;
   5717 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   5718 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   5719 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5720 		m_freem(txs->txs_mbuf);
   5721 		txs->txs_mbuf = NULL;
   5722 	}
   5723 
   5724 	/* Update the dirty transmit buffer pointer. */
   5725 	sc->sc_txsdirty = i;
   5726 	DPRINTF(WM_DEBUG_TX,
   5727 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   5728 
   5729 	/*
   5730 	 * If there are no more pending transmissions, cancel the watchdog
   5731 	 * timer.
   5732 	 */
   5733 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   5734 		ifp->if_timer = 0;
   5735 }
   5736 
   5737 /*
   5738  * wm_rxintr:
   5739  *
   5740  *	Helper; handle receive interrupts.
   5741  */
   5742 static void
   5743 wm_rxintr(struct wm_softc *sc)
   5744 {
   5745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5746 	struct wm_rxsoft *rxs;
   5747 	struct mbuf *m;
   5748 	int i, len;
   5749 	uint8_t status, errors;
   5750 	uint16_t vlantag;
   5751 
   5752 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   5753 		rxs = &sc->sc_rxsoft[i];
   5754 
   5755 		DPRINTF(WM_DEBUG_RX,
   5756 		    ("%s: RX: checking descriptor %d\n",
   5757 		    device_xname(sc->sc_dev), i));
   5758 
   5759 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5760 
   5761 		status = sc->sc_rxdescs[i].wrx_status;
   5762 		errors = sc->sc_rxdescs[i].wrx_errors;
   5763 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   5764 		vlantag = sc->sc_rxdescs[i].wrx_special;
   5765 
   5766 		if ((status & WRX_ST_DD) == 0) {
   5767 			/* We have processed all of the receive descriptors. */
   5768 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   5769 			break;
   5770 		}
   5771 
   5772 		if (__predict_false(sc->sc_rxdiscard)) {
   5773 			DPRINTF(WM_DEBUG_RX,
   5774 			    ("%s: RX: discarding contents of descriptor %d\n",
   5775 			    device_xname(sc->sc_dev), i));
   5776 			WM_INIT_RXDESC(sc, i);
   5777 			if (status & WRX_ST_EOP) {
   5778 				/* Reset our state. */
   5779 				DPRINTF(WM_DEBUG_RX,
   5780 				    ("%s: RX: resetting rxdiscard -> 0\n",
   5781 				    device_xname(sc->sc_dev)));
   5782 				sc->sc_rxdiscard = 0;
   5783 			}
   5784 			continue;
   5785 		}
   5786 
   5787 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5788 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   5789 
   5790 		m = rxs->rxs_mbuf;
   5791 
   5792 		/*
   5793 		 * Add a new receive buffer to the ring, unless of
   5794 		 * course the length is zero. Treat the latter as a
   5795 		 * failed mapping.
   5796 		 */
   5797 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   5798 			/*
   5799 			 * Failed, throw away what we've done so
   5800 			 * far, and discard the rest of the packet.
   5801 			 */
   5802 			ifp->if_ierrors++;
   5803 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5804 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5805 			WM_INIT_RXDESC(sc, i);
   5806 			if ((status & WRX_ST_EOP) == 0)
   5807 				sc->sc_rxdiscard = 1;
   5808 			if (sc->sc_rxhead != NULL)
   5809 				m_freem(sc->sc_rxhead);
   5810 			WM_RXCHAIN_RESET(sc);
   5811 			DPRINTF(WM_DEBUG_RX,
   5812 			    ("%s: RX: Rx buffer allocation failed, "
   5813 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   5814 			    sc->sc_rxdiscard ? " (discard)" : ""));
   5815 			continue;
   5816 		}
   5817 
   5818 		m->m_len = len;
   5819 		sc->sc_rxlen += len;
   5820 		DPRINTF(WM_DEBUG_RX,
   5821 		    ("%s: RX: buffer at %p len %d\n",
   5822 		    device_xname(sc->sc_dev), m->m_data, len));
   5823 
   5824 		/* If this is not the end of the packet, keep looking. */
   5825 		if ((status & WRX_ST_EOP) == 0) {
   5826 			WM_RXCHAIN_LINK(sc, m);
   5827 			DPRINTF(WM_DEBUG_RX,
   5828 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   5829 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   5830 			continue;
   5831 		}
   5832 
   5833 		/*
   5834 		 * Okay, we have the entire packet now.  The chip is
   5835 		 * configured to include the FCS except I350 and I21[01]
   5836 		 * (not all chips can be configured to strip it),
   5837 		 * so we need to trim it.
   5838 		 * May need to adjust length of previous mbuf in the
   5839 		 * chain if the current mbuf is too short.
   5840 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   5841 		 * is always set in I350, so we don't trim it.
   5842 		 */
   5843 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   5844 		    && (sc->sc_type != WM_T_I210)
   5845 		    && (sc->sc_type != WM_T_I211)) {
   5846 			if (m->m_len < ETHER_CRC_LEN) {
   5847 				sc->sc_rxtail->m_len
   5848 				    -= (ETHER_CRC_LEN - m->m_len);
   5849 				m->m_len = 0;
   5850 			} else
   5851 				m->m_len -= ETHER_CRC_LEN;
   5852 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   5853 		} else
   5854 			len = sc->sc_rxlen;
   5855 
   5856 		WM_RXCHAIN_LINK(sc, m);
   5857 
   5858 		*sc->sc_rxtailp = NULL;
   5859 		m = sc->sc_rxhead;
   5860 
   5861 		WM_RXCHAIN_RESET(sc);
   5862 
   5863 		DPRINTF(WM_DEBUG_RX,
   5864 		    ("%s: RX: have entire packet, len -> %d\n",
   5865 		    device_xname(sc->sc_dev), len));
   5866 
   5867 		/* If an error occurred, update stats and drop the packet. */
   5868 		if (errors &
   5869 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   5870 			if (errors & WRX_ER_SE)
   5871 				log(LOG_WARNING, "%s: symbol error\n",
   5872 				    device_xname(sc->sc_dev));
   5873 			else if (errors & WRX_ER_SEQ)
   5874 				log(LOG_WARNING, "%s: receive sequence error\n",
   5875 				    device_xname(sc->sc_dev));
   5876 			else if (errors & WRX_ER_CE)
   5877 				log(LOG_WARNING, "%s: CRC error\n",
   5878 				    device_xname(sc->sc_dev));
   5879 			m_freem(m);
   5880 			continue;
   5881 		}
   5882 
   5883 		/* No errors.  Receive the packet. */
   5884 		m->m_pkthdr.rcvif = ifp;
   5885 		m->m_pkthdr.len = len;
   5886 
   5887 		/*
   5888 		 * If VLANs are enabled, VLAN packets have been unwrapped
   5889 		 * for us.  Associate the tag with the packet.
   5890 		 */
   5891 		/* XXXX should check for i350 and i354 */
   5892 		if ((status & WRX_ST_VP) != 0) {
   5893 			VLAN_INPUT_TAG(ifp, m,
   5894 			    le16toh(vlantag),
   5895 			    continue);
   5896 		}
   5897 
   5898 		/* Set up checksum info for this packet. */
   5899 		if ((status & WRX_ST_IXSM) == 0) {
   5900 			if (status & WRX_ST_IPCS) {
   5901 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   5902 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   5903 				if (errors & WRX_ER_IPE)
   5904 					m->m_pkthdr.csum_flags |=
   5905 					    M_CSUM_IPv4_BAD;
   5906 			}
   5907 			if (status & WRX_ST_TCPCS) {
   5908 				/*
   5909 				 * Note: we don't know if this was TCP or UDP,
   5910 				 * so we just set both bits, and expect the
   5911 				 * upper layers to deal.
   5912 				 */
   5913 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   5914 				m->m_pkthdr.csum_flags |=
   5915 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   5916 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   5917 				if (errors & WRX_ER_TCPE)
   5918 					m->m_pkthdr.csum_flags |=
   5919 					    M_CSUM_TCP_UDP_BAD;
   5920 			}
   5921 		}
   5922 
   5923 		ifp->if_ipackets++;
   5924 
   5925 		WM_RX_UNLOCK(sc);
   5926 
   5927 		/* Pass this up to any BPF listeners. */
   5928 		bpf_mtap(ifp, m);
   5929 
   5930 		/* Pass it on. */
   5931 		(*ifp->if_input)(ifp, m);
   5932 
   5933 		WM_RX_LOCK(sc);
   5934 
   5935 		if (sc->sc_stopping)
   5936 			break;
   5937 	}
   5938 
   5939 	/* Update the receive pointer. */
   5940 	sc->sc_rxptr = i;
   5941 
   5942 	DPRINTF(WM_DEBUG_RX,
   5943 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   5944 }
   5945 
   5946 /*
   5947  * wm_linkintr_gmii:
   5948  *
   5949  *	Helper; handle link interrupts for GMII.
   5950  */
   5951 static void
   5952 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   5953 {
   5954 
   5955 	KASSERT(WM_TX_LOCKED(sc));
   5956 
   5957 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5958 		__func__));
   5959 
   5960 	if (icr & ICR_LSC) {
   5961 		DPRINTF(WM_DEBUG_LINK,
   5962 		    ("%s: LINK: LSC -> mii_pollstat\n",
   5963 			device_xname(sc->sc_dev)));
   5964 		mii_pollstat(&sc->sc_mii);
   5965 		if (sc->sc_type == WM_T_82543) {
   5966 			int miistatus, active;
   5967 
   5968 			/*
   5969 			 * With 82543, we need to force speed and
   5970 			 * duplex on the MAC equal to what the PHY
   5971 			 * speed and duplex configuration is.
   5972 			 */
   5973 			miistatus = sc->sc_mii.mii_media_status;
   5974 
   5975 			if (miistatus & IFM_ACTIVE) {
   5976 				active = sc->sc_mii.mii_media_active;
   5977 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   5978 				switch (IFM_SUBTYPE(active)) {
   5979 				case IFM_10_T:
   5980 					sc->sc_ctrl |= CTRL_SPEED_10;
   5981 					break;
   5982 				case IFM_100_TX:
   5983 					sc->sc_ctrl |= CTRL_SPEED_100;
   5984 					break;
   5985 				case IFM_1000_T:
   5986 					sc->sc_ctrl |= CTRL_SPEED_1000;
   5987 					break;
   5988 				default:
   5989 					/*
   5990 					 * fiber?
   5991 					 * Shoud not enter here.
   5992 					 */
   5993 					printf("unknown media (%x)\n",
   5994 					    active);
   5995 					break;
   5996 				}
   5997 				if (active & IFM_FDX)
   5998 					sc->sc_ctrl |= CTRL_FD;
   5999 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6000 			}
   6001 		} else if ((sc->sc_type == WM_T_ICH8)
   6002 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   6003 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   6004 		} else if (sc->sc_type == WM_T_PCH) {
   6005 			wm_k1_gig_workaround_hv(sc,
   6006 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   6007 		}
   6008 
   6009 		if ((sc->sc_phytype == WMPHY_82578)
   6010 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   6011 			== IFM_1000_T)) {
   6012 
   6013 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   6014 				delay(200*1000); /* XXX too big */
   6015 
   6016 				/* Link stall fix for link up */
   6017 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6018 				    HV_MUX_DATA_CTRL,
   6019 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   6020 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   6021 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6022 				    HV_MUX_DATA_CTRL,
   6023 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   6024 			}
   6025 		}
   6026 	} else if (icr & ICR_RXSEQ) {
   6027 		DPRINTF(WM_DEBUG_LINK,
   6028 		    ("%s: LINK Receive sequence error\n",
   6029 			device_xname(sc->sc_dev)));
   6030 	}
   6031 }
   6032 
   6033 /*
   6034  * wm_linkintr_tbi:
   6035  *
   6036  *	Helper; handle link interrupts for TBI mode.
   6037  */
   6038 static void
   6039 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   6040 {
   6041 	uint32_t status;
   6042 
   6043 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6044 		__func__));
   6045 
   6046 	status = CSR_READ(sc, WMREG_STATUS);
   6047 	if (icr & ICR_LSC) {
   6048 		if (status & STATUS_LU) {
   6049 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   6050 			    device_xname(sc->sc_dev),
   6051 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   6052 			/*
   6053 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   6054 			 * so we should update sc->sc_ctrl
   6055 			 */
   6056 
   6057 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   6058 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6059 			sc->sc_fcrtl &= ~FCRTL_XONE;
   6060 			if (status & STATUS_FD)
   6061 				sc->sc_tctl |=
   6062 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6063 			else
   6064 				sc->sc_tctl |=
   6065 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6066 			if (sc->sc_ctrl & CTRL_TFCE)
   6067 				sc->sc_fcrtl |= FCRTL_XONE;
   6068 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6069 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   6070 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   6071 				      sc->sc_fcrtl);
   6072 			sc->sc_tbi_linkup = 1;
   6073 		} else {
   6074 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   6075 			    device_xname(sc->sc_dev)));
   6076 			sc->sc_tbi_linkup = 0;
   6077 		}
   6078 		/* Update LED */
   6079 		wm_tbi_serdes_set_linkled(sc);
   6080 	} else if (icr & ICR_RXSEQ) {
   6081 		DPRINTF(WM_DEBUG_LINK,
   6082 		    ("%s: LINK: Receive sequence error\n",
   6083 		    device_xname(sc->sc_dev)));
   6084 	}
   6085 }
   6086 
   6087 /*
   6088  * wm_linkintr_serdes:
   6089  *
   6090  *	Helper; handle link interrupts for TBI mode.
   6091  */
   6092 static void
   6093 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   6094 {
   6095 	struct mii_data *mii = &sc->sc_mii;
   6096 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6097 	uint32_t pcs_adv, pcs_lpab, reg;
   6098 
   6099 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6100 		__func__));
   6101 
   6102 	if (icr & ICR_LSC) {
   6103 		/* Check PCS */
   6104 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6105 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   6106 			mii->mii_media_status |= IFM_ACTIVE;
   6107 			sc->sc_tbi_linkup = 1;
   6108 		} else {
   6109 			mii->mii_media_status |= IFM_NONE;
   6110 			sc->sc_tbi_linkup = 0;
   6111 			wm_tbi_serdes_set_linkled(sc);
   6112 			return;
   6113 		}
   6114 		mii->mii_media_active |= IFM_1000_SX;
   6115 		if ((reg & PCS_LSTS_FDX) != 0)
   6116 			mii->mii_media_active |= IFM_FDX;
   6117 		else
   6118 			mii->mii_media_active |= IFM_HDX;
   6119 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6120 			/* Check flow */
   6121 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6122 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   6123 				DPRINTF(WM_DEBUG_LINK,
   6124 				    ("XXX LINKOK but not ACOMP\n"));
   6125 				return;
   6126 			}
   6127 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   6128 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   6129 			DPRINTF(WM_DEBUG_LINK,
   6130 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   6131 			if ((pcs_adv & TXCW_SYM_PAUSE)
   6132 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   6133 				mii->mii_media_active |= IFM_FLOW
   6134 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   6135 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   6136 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6137 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   6138 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6139 				mii->mii_media_active |= IFM_FLOW
   6140 				    | IFM_ETH_TXPAUSE;
   6141 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   6142 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6143 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   6144 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6145 				mii->mii_media_active |= IFM_FLOW
   6146 				    | IFM_ETH_RXPAUSE;
   6147 		}
   6148 		/* Update LED */
   6149 		wm_tbi_serdes_set_linkled(sc);
   6150 	} else {
   6151 		DPRINTF(WM_DEBUG_LINK,
   6152 		    ("%s: LINK: Receive sequence error\n",
   6153 		    device_xname(sc->sc_dev)));
   6154 	}
   6155 }
   6156 
   6157 /*
   6158  * wm_linkintr:
   6159  *
   6160  *	Helper; handle link interrupts.
   6161  */
   6162 static void
   6163 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   6164 {
   6165 
   6166 	if (sc->sc_flags & WM_F_HAS_MII)
   6167 		wm_linkintr_gmii(sc, icr);
   6168 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   6169 	    && (sc->sc_type >= WM_T_82575))
   6170 		wm_linkintr_serdes(sc, icr);
   6171 	else
   6172 		wm_linkintr_tbi(sc, icr);
   6173 }
   6174 
   6175 /*
   6176  * wm_intr:
   6177  *
   6178  *	Interrupt service routine.
   6179  */
   6180 static int
   6181 wm_intr(void *arg)
   6182 {
   6183 	struct wm_softc *sc = arg;
   6184 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6185 	uint32_t icr;
   6186 	int handled = 0;
   6187 
   6188 	while (1 /* CONSTCOND */) {
   6189 		icr = CSR_READ(sc, WMREG_ICR);
   6190 		if ((icr & sc->sc_icr) == 0)
   6191 			break;
   6192 		rnd_add_uint32(&sc->rnd_source, icr);
   6193 
   6194 		WM_RX_LOCK(sc);
   6195 
   6196 		if (sc->sc_stopping) {
   6197 			WM_RX_UNLOCK(sc);
   6198 			break;
   6199 		}
   6200 
   6201 		handled = 1;
   6202 
   6203 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   6204 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   6205 			DPRINTF(WM_DEBUG_RX,
   6206 			    ("%s: RX: got Rx intr 0x%08x\n",
   6207 			    device_xname(sc->sc_dev),
   6208 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   6209 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   6210 		}
   6211 #endif
   6212 		wm_rxintr(sc);
   6213 
   6214 		WM_RX_UNLOCK(sc);
   6215 		WM_TX_LOCK(sc);
   6216 
   6217 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   6218 		if (icr & ICR_TXDW) {
   6219 			DPRINTF(WM_DEBUG_TX,
   6220 			    ("%s: TX: got TXDW interrupt\n",
   6221 			    device_xname(sc->sc_dev)));
   6222 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   6223 		}
   6224 #endif
   6225 		wm_txintr(sc);
   6226 
   6227 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   6228 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   6229 			wm_linkintr(sc, icr);
   6230 		}
   6231 
   6232 		WM_TX_UNLOCK(sc);
   6233 
   6234 		if (icr & ICR_RXO) {
   6235 #if defined(WM_DEBUG)
   6236 			log(LOG_WARNING, "%s: Receive overrun\n",
   6237 			    device_xname(sc->sc_dev));
   6238 #endif /* defined(WM_DEBUG) */
   6239 		}
   6240 	}
   6241 
   6242 	if (handled) {
   6243 		/* Try to get more packets going. */
   6244 		ifp->if_start(ifp);
   6245 	}
   6246 
   6247 	return handled;
   6248 }
   6249 
   6250 /*
   6251  * Media related.
   6252  * GMII, SGMII, TBI (and SERDES)
   6253  */
   6254 
   6255 /* Common */
   6256 
   6257 /*
   6258  * wm_tbi_serdes_set_linkled:
   6259  *
   6260  *	Update the link LED on TBI and SERDES devices.
   6261  */
   6262 static void
   6263 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   6264 {
   6265 
   6266 	if (sc->sc_tbi_linkup)
   6267 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   6268 	else
   6269 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   6270 
   6271 	/* 82540 or newer devices are active low */
   6272 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   6273 
   6274 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6275 }
   6276 
   6277 /* GMII related */
   6278 
   6279 /*
   6280  * wm_gmii_reset:
   6281  *
   6282  *	Reset the PHY.
   6283  */
   6284 static void
   6285 wm_gmii_reset(struct wm_softc *sc)
   6286 {
   6287 	uint32_t reg;
   6288 	int rv;
   6289 
   6290 	/* get phy semaphore */
   6291 	switch (sc->sc_type) {
   6292 	case WM_T_82571:
   6293 	case WM_T_82572:
   6294 	case WM_T_82573:
   6295 	case WM_T_82574:
   6296 	case WM_T_82583:
   6297 		 /* XXX should get sw semaphore, too */
   6298 		rv = wm_get_swsm_semaphore(sc);
   6299 		break;
   6300 	case WM_T_82575:
   6301 	case WM_T_82576:
   6302 	case WM_T_82580:
   6303 	case WM_T_I350:
   6304 	case WM_T_I354:
   6305 	case WM_T_I210:
   6306 	case WM_T_I211:
   6307 	case WM_T_80003:
   6308 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6309 		break;
   6310 	case WM_T_ICH8:
   6311 	case WM_T_ICH9:
   6312 	case WM_T_ICH10:
   6313 	case WM_T_PCH:
   6314 	case WM_T_PCH2:
   6315 	case WM_T_PCH_LPT:
   6316 		rv = wm_get_swfwhw_semaphore(sc);
   6317 		break;
   6318 	default:
   6319 		/* nothing to do*/
   6320 		rv = 0;
   6321 		break;
   6322 	}
   6323 	if (rv != 0) {
   6324 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6325 		    __func__);
   6326 		return;
   6327 	}
   6328 
   6329 	switch (sc->sc_type) {
   6330 	case WM_T_82542_2_0:
   6331 	case WM_T_82542_2_1:
   6332 		/* null */
   6333 		break;
   6334 	case WM_T_82543:
   6335 		/*
   6336 		 * With 82543, we need to force speed and duplex on the MAC
   6337 		 * equal to what the PHY speed and duplex configuration is.
   6338 		 * In addition, we need to perform a hardware reset on the PHY
   6339 		 * to take it out of reset.
   6340 		 */
   6341 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6342 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6343 
   6344 		/* The PHY reset pin is active-low. */
   6345 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6346 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   6347 		    CTRL_EXT_SWDPIN(4));
   6348 		reg |= CTRL_EXT_SWDPIO(4);
   6349 
   6350 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6351 		CSR_WRITE_FLUSH(sc);
   6352 		delay(10*1000);
   6353 
   6354 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   6355 		CSR_WRITE_FLUSH(sc);
   6356 		delay(150);
   6357 #if 0
   6358 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   6359 #endif
   6360 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   6361 		break;
   6362 	case WM_T_82544:	/* reset 10000us */
   6363 	case WM_T_82540:
   6364 	case WM_T_82545:
   6365 	case WM_T_82545_3:
   6366 	case WM_T_82546:
   6367 	case WM_T_82546_3:
   6368 	case WM_T_82541:
   6369 	case WM_T_82541_2:
   6370 	case WM_T_82547:
   6371 	case WM_T_82547_2:
   6372 	case WM_T_82571:	/* reset 100us */
   6373 	case WM_T_82572:
   6374 	case WM_T_82573:
   6375 	case WM_T_82574:
   6376 	case WM_T_82575:
   6377 	case WM_T_82576:
   6378 	case WM_T_82580:
   6379 	case WM_T_I350:
   6380 	case WM_T_I354:
   6381 	case WM_T_I210:
   6382 	case WM_T_I211:
   6383 	case WM_T_82583:
   6384 	case WM_T_80003:
   6385 		/* generic reset */
   6386 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6387 		CSR_WRITE_FLUSH(sc);
   6388 		delay(20000);
   6389 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6390 		CSR_WRITE_FLUSH(sc);
   6391 		delay(20000);
   6392 
   6393 		if ((sc->sc_type == WM_T_82541)
   6394 		    || (sc->sc_type == WM_T_82541_2)
   6395 		    || (sc->sc_type == WM_T_82547)
   6396 		    || (sc->sc_type == WM_T_82547_2)) {
   6397 			/* workaround for igp are done in igp_reset() */
   6398 			/* XXX add code to set LED after phy reset */
   6399 		}
   6400 		break;
   6401 	case WM_T_ICH8:
   6402 	case WM_T_ICH9:
   6403 	case WM_T_ICH10:
   6404 	case WM_T_PCH:
   6405 	case WM_T_PCH2:
   6406 	case WM_T_PCH_LPT:
   6407 		/* generic reset */
   6408 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6409 		CSR_WRITE_FLUSH(sc);
   6410 		delay(100);
   6411 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6412 		CSR_WRITE_FLUSH(sc);
   6413 		delay(150);
   6414 		break;
   6415 	default:
   6416 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   6417 		    __func__);
   6418 		break;
   6419 	}
   6420 
   6421 	/* release PHY semaphore */
   6422 	switch (sc->sc_type) {
   6423 	case WM_T_82571:
   6424 	case WM_T_82572:
   6425 	case WM_T_82573:
   6426 	case WM_T_82574:
   6427 	case WM_T_82583:
   6428 		 /* XXX should put sw semaphore, too */
   6429 		wm_put_swsm_semaphore(sc);
   6430 		break;
   6431 	case WM_T_82575:
   6432 	case WM_T_82576:
   6433 	case WM_T_82580:
   6434 	case WM_T_I350:
   6435 	case WM_T_I354:
   6436 	case WM_T_I210:
   6437 	case WM_T_I211:
   6438 	case WM_T_80003:
   6439 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6440 		break;
   6441 	case WM_T_ICH8:
   6442 	case WM_T_ICH9:
   6443 	case WM_T_ICH10:
   6444 	case WM_T_PCH:
   6445 	case WM_T_PCH2:
   6446 	case WM_T_PCH_LPT:
   6447 		wm_put_swfwhw_semaphore(sc);
   6448 		break;
   6449 	default:
   6450 		/* nothing to do*/
   6451 		rv = 0;
   6452 		break;
   6453 	}
   6454 
   6455 	/* get_cfg_done */
   6456 	wm_get_cfg_done(sc);
   6457 
   6458 	/* extra setup */
   6459 	switch (sc->sc_type) {
   6460 	case WM_T_82542_2_0:
   6461 	case WM_T_82542_2_1:
   6462 	case WM_T_82543:
   6463 	case WM_T_82544:
   6464 	case WM_T_82540:
   6465 	case WM_T_82545:
   6466 	case WM_T_82545_3:
   6467 	case WM_T_82546:
   6468 	case WM_T_82546_3:
   6469 	case WM_T_82541_2:
   6470 	case WM_T_82547_2:
   6471 	case WM_T_82571:
   6472 	case WM_T_82572:
   6473 	case WM_T_82573:
   6474 	case WM_T_82574:
   6475 	case WM_T_82575:
   6476 	case WM_T_82576:
   6477 	case WM_T_82580:
   6478 	case WM_T_I350:
   6479 	case WM_T_I354:
   6480 	case WM_T_I210:
   6481 	case WM_T_I211:
   6482 	case WM_T_82583:
   6483 	case WM_T_80003:
   6484 		/* null */
   6485 		break;
   6486 	case WM_T_82541:
   6487 	case WM_T_82547:
   6488 		/* XXX Configure actively LED after PHY reset */
   6489 		break;
   6490 	case WM_T_ICH8:
   6491 	case WM_T_ICH9:
   6492 	case WM_T_ICH10:
   6493 	case WM_T_PCH:
   6494 	case WM_T_PCH2:
   6495 	case WM_T_PCH_LPT:
   6496 		/* Allow time for h/w to get to a quiescent state afer reset */
   6497 		delay(10*1000);
   6498 
   6499 		if (sc->sc_type == WM_T_PCH)
   6500 			wm_hv_phy_workaround_ich8lan(sc);
   6501 
   6502 		if (sc->sc_type == WM_T_PCH2)
   6503 			wm_lv_phy_workaround_ich8lan(sc);
   6504 
   6505 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   6506 			/*
   6507 			 * dummy read to clear the phy wakeup bit after lcd
   6508 			 * reset
   6509 			 */
   6510 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   6511 		}
   6512 
   6513 		/*
   6514 		 * XXX Configure the LCD with th extended configuration region
   6515 		 * in NVM
   6516 		 */
   6517 
   6518 		/* Configure the LCD with the OEM bits in NVM */
   6519 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6520 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   6521 			/*
   6522 			 * Disable LPLU.
   6523 			 * XXX It seems that 82567 has LPLU, too.
   6524 			 */
   6525 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6526 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6527 			reg |= HV_OEM_BITS_ANEGNOW;
   6528 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6529 		}
   6530 		break;
   6531 	default:
   6532 		panic("%s: unknown type\n", __func__);
   6533 		break;
   6534 	}
   6535 }
   6536 
   6537 /*
   6538  * wm_get_phy_id_82575:
   6539  *
   6540  * Return PHY ID. Return -1 if it failed.
   6541  */
   6542 static int
   6543 wm_get_phy_id_82575(struct wm_softc *sc)
   6544 {
   6545 	uint32_t reg;
   6546 	int phyid = -1;
   6547 
   6548 	/* XXX */
   6549 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   6550 		return -1;
   6551 
   6552 	if (wm_sgmii_uses_mdio(sc)) {
   6553 		switch (sc->sc_type) {
   6554 		case WM_T_82575:
   6555 		case WM_T_82576:
   6556 			reg = CSR_READ(sc, WMREG_MDIC);
   6557 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   6558 			break;
   6559 		case WM_T_82580:
   6560 		case WM_T_I350:
   6561 		case WM_T_I354:
   6562 		case WM_T_I210:
   6563 		case WM_T_I211:
   6564 			reg = CSR_READ(sc, WMREG_MDICNFG);
   6565 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   6566 			break;
   6567 		default:
   6568 			return -1;
   6569 		}
   6570 	}
   6571 
   6572 	return phyid;
   6573 }
   6574 
   6575 
   6576 /*
   6577  * wm_gmii_mediainit:
   6578  *
   6579  *	Initialize media for use on 1000BASE-T devices.
   6580  */
   6581 static void
   6582 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6583 {
   6584 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6585 	struct mii_data *mii = &sc->sc_mii;
   6586 	uint32_t reg;
   6587 
   6588 	/* We have GMII. */
   6589 	sc->sc_flags |= WM_F_HAS_MII;
   6590 
   6591 	if (sc->sc_type == WM_T_80003)
   6592 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6593 	else
   6594 		sc->sc_tipg = TIPG_1000T_DFLT;
   6595 
   6596 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   6597 	if ((sc->sc_type == WM_T_82580)
   6598 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   6599 	    || (sc->sc_type == WM_T_I211)) {
   6600 		reg = CSR_READ(sc, WMREG_PHPM);
   6601 		reg &= ~PHPM_GO_LINK_D;
   6602 		CSR_WRITE(sc, WMREG_PHPM, reg);
   6603 	}
   6604 
   6605 	/*
   6606 	 * Let the chip set speed/duplex on its own based on
   6607 	 * signals from the PHY.
   6608 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6609 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6610 	 */
   6611 	sc->sc_ctrl |= CTRL_SLU;
   6612 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6613 
   6614 	/* Initialize our media structures and probe the GMII. */
   6615 	mii->mii_ifp = ifp;
   6616 
   6617 	/*
   6618 	 * Determine the PHY access method.
   6619 	 *
   6620 	 *  For SGMII, use SGMII specific method.
   6621 	 *
   6622 	 *  For some devices, we can determine the PHY access method
   6623 	 * from sc_type.
   6624 	 *
   6625 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   6626 	 * access  method by sc_type, so use the PCI product ID for some
   6627 	 * devices.
   6628 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6629 	 * can't detect, then use bm's method.
   6630 	 */
   6631 	switch (prodid) {
   6632 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6633 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6634 		/* 82577 */
   6635 		sc->sc_phytype = WMPHY_82577;
   6636 		break;
   6637 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6638 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6639 		/* 82578 */
   6640 		sc->sc_phytype = WMPHY_82578;
   6641 		break;
   6642 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6643 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6644 		/* 82579 */
   6645 		sc->sc_phytype = WMPHY_82579;
   6646 		break;
   6647 	case PCI_PRODUCT_INTEL_82801I_BM:
   6648 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6649 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6650 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6651 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6652 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6653 		/* 82567 */
   6654 		sc->sc_phytype = WMPHY_BM;
   6655 		mii->mii_readreg = wm_gmii_bm_readreg;
   6656 		mii->mii_writereg = wm_gmii_bm_writereg;
   6657 		break;
   6658 	default:
   6659 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   6660 		    && !wm_sgmii_uses_mdio(sc)){
   6661 			/* SGMII */
   6662 			mii->mii_readreg = wm_sgmii_readreg;
   6663 			mii->mii_writereg = wm_sgmii_writereg;
   6664 		} else if (sc->sc_type >= WM_T_80003) {
   6665 			/* 80003 */
   6666 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6667 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6668 		} else if (sc->sc_type >= WM_T_I210) {
   6669 			/* I210 and I211 */
   6670 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   6671 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   6672 		} else if (sc->sc_type >= WM_T_82580) {
   6673 			/* 82580, I350 and I354 */
   6674 			sc->sc_phytype = WMPHY_82580;
   6675 			mii->mii_readreg = wm_gmii_82580_readreg;
   6676 			mii->mii_writereg = wm_gmii_82580_writereg;
   6677 		} else if (sc->sc_type >= WM_T_82544) {
   6678 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   6679 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6680 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6681 		} else {
   6682 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6683 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6684 		}
   6685 		break;
   6686 	}
   6687 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   6688 		/* All PCH* use _hv_ */
   6689 		mii->mii_readreg = wm_gmii_hv_readreg;
   6690 		mii->mii_writereg = wm_gmii_hv_writereg;
   6691 	}
   6692 	mii->mii_statchg = wm_gmii_statchg;
   6693 
   6694 	wm_gmii_reset(sc);
   6695 
   6696 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6697 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6698 	    wm_gmii_mediastatus);
   6699 
   6700 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6701 	    || (sc->sc_type == WM_T_82580)
   6702 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6703 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   6704 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6705 			/* Attach only one port */
   6706 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6707 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6708 		} else {
   6709 			int i, id;
   6710 			uint32_t ctrl_ext;
   6711 
   6712 			id = wm_get_phy_id_82575(sc);
   6713 			if (id != -1) {
   6714 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6715 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6716 			}
   6717 			if ((id == -1)
   6718 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6719 				/* Power on sgmii phy if it is disabled */
   6720 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6721 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   6722 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6723 				CSR_WRITE_FLUSH(sc);
   6724 				delay(300*1000); /* XXX too long */
   6725 
   6726 				/* from 1 to 8 */
   6727 				for (i = 1; i < 8; i++)
   6728 					mii_attach(sc->sc_dev, &sc->sc_mii,
   6729 					    0xffffffff, i, MII_OFFSET_ANY,
   6730 					    MIIF_DOPAUSE);
   6731 
   6732 				/* restore previous sfp cage power state */
   6733 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6734 			}
   6735 		}
   6736 	} else {
   6737 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6738 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6739 	}
   6740 
   6741 	/*
   6742 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6743 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6744 	 */
   6745 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6746 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6747 		wm_set_mdio_slow_mode_hv(sc);
   6748 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6749 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6750 	}
   6751 
   6752 	/*
   6753 	 * (For ICH8 variants)
   6754 	 * If PHY detection failed, use BM's r/w function and retry.
   6755 	 */
   6756 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6757 		/* if failed, retry with *_bm_* */
   6758 		mii->mii_readreg = wm_gmii_bm_readreg;
   6759 		mii->mii_writereg = wm_gmii_bm_writereg;
   6760 
   6761 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6762 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6763 	}
   6764 
   6765 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6766 		/* Any PHY wasn't find */
   6767 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6768 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6769 		sc->sc_phytype = WMPHY_NONE;
   6770 	} else {
   6771 		/*
   6772 		 * PHY Found!
   6773 		 * Check PHY type.
   6774 		 */
   6775 		uint32_t model;
   6776 		struct mii_softc *child;
   6777 
   6778 		child = LIST_FIRST(&mii->mii_phys);
   6779 		if (device_is_a(child->mii_dev, "igphy")) {
   6780 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6781 
   6782 			model = isc->sc_mii.mii_mpd_model;
   6783 			if (model == MII_MODEL_yyINTEL_I82566)
   6784 				sc->sc_phytype = WMPHY_IGP_3;
   6785 		}
   6786 
   6787 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6788 	}
   6789 }
   6790 
   6791 /*
   6792  * wm_gmii_mediachange:	[ifmedia interface function]
   6793  *
   6794  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6795  */
   6796 static int
   6797 wm_gmii_mediachange(struct ifnet *ifp)
   6798 {
   6799 	struct wm_softc *sc = ifp->if_softc;
   6800 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6801 	int rc;
   6802 
   6803 	if ((ifp->if_flags & IFF_UP) == 0)
   6804 		return 0;
   6805 
   6806 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6807 	sc->sc_ctrl |= CTRL_SLU;
   6808 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6809 	    || (sc->sc_type > WM_T_82543)) {
   6810 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6811 	} else {
   6812 		sc->sc_ctrl &= ~CTRL_ASDE;
   6813 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6814 		if (ife->ifm_media & IFM_FDX)
   6815 			sc->sc_ctrl |= CTRL_FD;
   6816 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6817 		case IFM_10_T:
   6818 			sc->sc_ctrl |= CTRL_SPEED_10;
   6819 			break;
   6820 		case IFM_100_TX:
   6821 			sc->sc_ctrl |= CTRL_SPEED_100;
   6822 			break;
   6823 		case IFM_1000_T:
   6824 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6825 			break;
   6826 		default:
   6827 			panic("wm_gmii_mediachange: bad media 0x%x",
   6828 			    ife->ifm_media);
   6829 		}
   6830 	}
   6831 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6832 	if (sc->sc_type <= WM_T_82543)
   6833 		wm_gmii_reset(sc);
   6834 
   6835 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6836 		return 0;
   6837 	return rc;
   6838 }
   6839 
   6840 /*
   6841  * wm_gmii_mediastatus:	[ifmedia interface function]
   6842  *
   6843  *	Get the current interface media status on a 1000BASE-T device.
   6844  */
   6845 static void
   6846 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6847 {
   6848 	struct wm_softc *sc = ifp->if_softc;
   6849 
   6850 	ether_mediastatus(ifp, ifmr);
   6851 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6852 	    | sc->sc_flowflags;
   6853 }
   6854 
   6855 #define	MDI_IO		CTRL_SWDPIN(2)
   6856 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6857 #define	MDI_CLK		CTRL_SWDPIN(3)
   6858 
   6859 static void
   6860 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6861 {
   6862 	uint32_t i, v;
   6863 
   6864 	v = CSR_READ(sc, WMREG_CTRL);
   6865 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6866 	v |= MDI_DIR | CTRL_SWDPIO(3);
   6867 
   6868 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   6869 		if (data & i)
   6870 			v |= MDI_IO;
   6871 		else
   6872 			v &= ~MDI_IO;
   6873 		CSR_WRITE(sc, WMREG_CTRL, v);
   6874 		CSR_WRITE_FLUSH(sc);
   6875 		delay(10);
   6876 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6877 		CSR_WRITE_FLUSH(sc);
   6878 		delay(10);
   6879 		CSR_WRITE(sc, WMREG_CTRL, v);
   6880 		CSR_WRITE_FLUSH(sc);
   6881 		delay(10);
   6882 	}
   6883 }
   6884 
   6885 static uint32_t
   6886 wm_i82543_mii_recvbits(struct wm_softc *sc)
   6887 {
   6888 	uint32_t v, i, data = 0;
   6889 
   6890 	v = CSR_READ(sc, WMREG_CTRL);
   6891 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6892 	v |= CTRL_SWDPIO(3);
   6893 
   6894 	CSR_WRITE(sc, WMREG_CTRL, v);
   6895 	CSR_WRITE_FLUSH(sc);
   6896 	delay(10);
   6897 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6898 	CSR_WRITE_FLUSH(sc);
   6899 	delay(10);
   6900 	CSR_WRITE(sc, WMREG_CTRL, v);
   6901 	CSR_WRITE_FLUSH(sc);
   6902 	delay(10);
   6903 
   6904 	for (i = 0; i < 16; i++) {
   6905 		data <<= 1;
   6906 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6907 		CSR_WRITE_FLUSH(sc);
   6908 		delay(10);
   6909 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   6910 			data |= 1;
   6911 		CSR_WRITE(sc, WMREG_CTRL, v);
   6912 		CSR_WRITE_FLUSH(sc);
   6913 		delay(10);
   6914 	}
   6915 
   6916 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6917 	CSR_WRITE_FLUSH(sc);
   6918 	delay(10);
   6919 	CSR_WRITE(sc, WMREG_CTRL, v);
   6920 	CSR_WRITE_FLUSH(sc);
   6921 	delay(10);
   6922 
   6923 	return data;
   6924 }
   6925 
   6926 #undef MDI_IO
   6927 #undef MDI_DIR
   6928 #undef MDI_CLK
   6929 
   6930 /*
   6931  * wm_gmii_i82543_readreg:	[mii interface function]
   6932  *
   6933  *	Read a PHY register on the GMII (i82543 version).
   6934  */
   6935 static int
   6936 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   6937 {
   6938 	struct wm_softc *sc = device_private(self);
   6939 	int rv;
   6940 
   6941 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6942 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   6943 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   6944 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   6945 
   6946 	DPRINTF(WM_DEBUG_GMII,
   6947 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   6948 	    device_xname(sc->sc_dev), phy, reg, rv));
   6949 
   6950 	return rv;
   6951 }
   6952 
   6953 /*
   6954  * wm_gmii_i82543_writereg:	[mii interface function]
   6955  *
   6956  *	Write a PHY register on the GMII (i82543 version).
   6957  */
   6958 static void
   6959 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   6960 {
   6961 	struct wm_softc *sc = device_private(self);
   6962 
   6963 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6964 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   6965 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   6966 	    (MII_COMMAND_START << 30), 32);
   6967 }
   6968 
   6969 /*
   6970  * wm_gmii_i82544_readreg:	[mii interface function]
   6971  *
   6972  *	Read a PHY register on the GMII.
   6973  */
   6974 static int
   6975 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   6976 {
   6977 	struct wm_softc *sc = device_private(self);
   6978 	uint32_t mdic = 0;
   6979 	int i, rv;
   6980 
   6981 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   6982 	    MDIC_REGADD(reg));
   6983 
   6984 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6985 		mdic = CSR_READ(sc, WMREG_MDIC);
   6986 		if (mdic & MDIC_READY)
   6987 			break;
   6988 		delay(50);
   6989 	}
   6990 
   6991 	if ((mdic & MDIC_READY) == 0) {
   6992 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   6993 		    device_xname(sc->sc_dev), phy, reg);
   6994 		rv = 0;
   6995 	} else if (mdic & MDIC_E) {
   6996 #if 0 /* This is normal if no PHY is present. */
   6997 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   6998 		    device_xname(sc->sc_dev), phy, reg);
   6999 #endif
   7000 		rv = 0;
   7001 	} else {
   7002 		rv = MDIC_DATA(mdic);
   7003 		if (rv == 0xffff)
   7004 			rv = 0;
   7005 	}
   7006 
   7007 	return rv;
   7008 }
   7009 
   7010 /*
   7011  * wm_gmii_i82544_writereg:	[mii interface function]
   7012  *
   7013  *	Write a PHY register on the GMII.
   7014  */
   7015 static void
   7016 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   7017 {
   7018 	struct wm_softc *sc = device_private(self);
   7019 	uint32_t mdic = 0;
   7020 	int i;
   7021 
   7022 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   7023 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   7024 
   7025 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7026 		mdic = CSR_READ(sc, WMREG_MDIC);
   7027 		if (mdic & MDIC_READY)
   7028 			break;
   7029 		delay(50);
   7030 	}
   7031 
   7032 	if ((mdic & MDIC_READY) == 0)
   7033 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   7034 		    device_xname(sc->sc_dev), phy, reg);
   7035 	else if (mdic & MDIC_E)
   7036 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   7037 		    device_xname(sc->sc_dev), phy, reg);
   7038 }
   7039 
   7040 /*
   7041  * wm_gmii_i80003_readreg:	[mii interface function]
   7042  *
   7043  *	Read a PHY register on the kumeran
   7044  * This could be handled by the PHY layer if we didn't have to lock the
   7045  * ressource ...
   7046  */
   7047 static int
   7048 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   7049 {
   7050 	struct wm_softc *sc = device_private(self);
   7051 	int sem;
   7052 	int rv;
   7053 
   7054 	if (phy != 1) /* only one PHY on kumeran bus */
   7055 		return 0;
   7056 
   7057 	sem = swfwphysem[sc->sc_funcid];
   7058 	if (wm_get_swfw_semaphore(sc, sem)) {
   7059 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7060 		    __func__);
   7061 		return 0;
   7062 	}
   7063 
   7064 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7065 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7066 		    reg >> GG82563_PAGE_SHIFT);
   7067 	} else {
   7068 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   7069 		    reg >> GG82563_PAGE_SHIFT);
   7070 	}
   7071 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   7072 	delay(200);
   7073 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   7074 	delay(200);
   7075 
   7076 	wm_put_swfw_semaphore(sc, sem);
   7077 	return rv;
   7078 }
   7079 
   7080 /*
   7081  * wm_gmii_i80003_writereg:	[mii interface function]
   7082  *
   7083  *	Write a PHY register on the kumeran.
   7084  * This could be handled by the PHY layer if we didn't have to lock the
   7085  * ressource ...
   7086  */
   7087 static void
   7088 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   7089 {
   7090 	struct wm_softc *sc = device_private(self);
   7091 	int sem;
   7092 
   7093 	if (phy != 1) /* only one PHY on kumeran bus */
   7094 		return;
   7095 
   7096 	sem = swfwphysem[sc->sc_funcid];
   7097 	if (wm_get_swfw_semaphore(sc, sem)) {
   7098 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7099 		    __func__);
   7100 		return;
   7101 	}
   7102 
   7103 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7104 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7105 		    reg >> GG82563_PAGE_SHIFT);
   7106 	} else {
   7107 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   7108 		    reg >> GG82563_PAGE_SHIFT);
   7109 	}
   7110 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   7111 	delay(200);
   7112 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   7113 	delay(200);
   7114 
   7115 	wm_put_swfw_semaphore(sc, sem);
   7116 }
   7117 
   7118 /*
   7119  * wm_gmii_bm_readreg:	[mii interface function]
   7120  *
   7121  *	Read a PHY register on the kumeran
   7122  * This could be handled by the PHY layer if we didn't have to lock the
   7123  * ressource ...
   7124  */
   7125 static int
   7126 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   7127 {
   7128 	struct wm_softc *sc = device_private(self);
   7129 	int sem;
   7130 	int rv;
   7131 
   7132 	sem = swfwphysem[sc->sc_funcid];
   7133 	if (wm_get_swfw_semaphore(sc, sem)) {
   7134 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7135 		    __func__);
   7136 		return 0;
   7137 	}
   7138 
   7139 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   7140 		if (phy == 1)
   7141 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   7142 			    reg);
   7143 		else
   7144 			wm_gmii_i82544_writereg(self, phy,
   7145 			    GG82563_PHY_PAGE_SELECT,
   7146 			    reg >> GG82563_PAGE_SHIFT);
   7147 	}
   7148 
   7149 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   7150 	wm_put_swfw_semaphore(sc, sem);
   7151 	return rv;
   7152 }
   7153 
   7154 /*
   7155  * wm_gmii_bm_writereg:	[mii interface function]
   7156  *
   7157  *	Write a PHY register on the kumeran.
   7158  * This could be handled by the PHY layer if we didn't have to lock the
   7159  * ressource ...
   7160  */
   7161 static void
   7162 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   7163 {
   7164 	struct wm_softc *sc = device_private(self);
   7165 	int sem;
   7166 
   7167 	sem = swfwphysem[sc->sc_funcid];
   7168 	if (wm_get_swfw_semaphore(sc, sem)) {
   7169 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7170 		    __func__);
   7171 		return;
   7172 	}
   7173 
   7174 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   7175 		if (phy == 1)
   7176 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   7177 			    reg);
   7178 		else
   7179 			wm_gmii_i82544_writereg(self, phy,
   7180 			    GG82563_PHY_PAGE_SELECT,
   7181 			    reg >> GG82563_PAGE_SHIFT);
   7182 	}
   7183 
   7184 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   7185 	wm_put_swfw_semaphore(sc, sem);
   7186 }
   7187 
   7188 static void
   7189 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   7190 {
   7191 	struct wm_softc *sc = device_private(self);
   7192 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   7193 	uint16_t wuce;
   7194 
   7195 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   7196 	if (sc->sc_type == WM_T_PCH) {
   7197 		/* XXX e1000 driver do nothing... why? */
   7198 	}
   7199 
   7200 	/* Set page 769 */
   7201 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7202 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   7203 
   7204 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   7205 
   7206 	wuce &= ~BM_WUC_HOST_WU_BIT;
   7207 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   7208 	    wuce | BM_WUC_ENABLE_BIT);
   7209 
   7210 	/* Select page 800 */
   7211 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7212 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   7213 
   7214 	/* Write page 800 */
   7215 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   7216 
   7217 	if (rd)
   7218 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   7219 	else
   7220 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   7221 
   7222 	/* Set page 769 */
   7223 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7224 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   7225 
   7226 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   7227 }
   7228 
   7229 /*
   7230  * wm_gmii_hv_readreg:	[mii interface function]
   7231  *
   7232  *	Read a PHY register on the kumeran
   7233  * This could be handled by the PHY layer if we didn't have to lock the
   7234  * ressource ...
   7235  */
   7236 static int
   7237 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   7238 {
   7239 	struct wm_softc *sc = device_private(self);
   7240 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7241 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7242 	uint16_t val;
   7243 	int rv;
   7244 
   7245 	if (wm_get_swfwhw_semaphore(sc)) {
   7246 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7247 		    __func__);
   7248 		return 0;
   7249 	}
   7250 
   7251 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7252 	if (sc->sc_phytype == WMPHY_82577) {
   7253 		/* XXX must write */
   7254 	}
   7255 
   7256 	/* Page 800 works differently than the rest so it has its own func */
   7257 	if (page == BM_WUC_PAGE) {
   7258 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   7259 		return val;
   7260 	}
   7261 
   7262 	/*
   7263 	 * Lower than page 768 works differently than the rest so it has its
   7264 	 * own func
   7265 	 */
   7266 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7267 		printf("gmii_hv_readreg!!!\n");
   7268 		return 0;
   7269 	}
   7270 
   7271 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7272 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7273 		    page << BME1000_PAGE_SHIFT);
   7274 	}
   7275 
   7276 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   7277 	wm_put_swfwhw_semaphore(sc);
   7278 	return rv;
   7279 }
   7280 
   7281 /*
   7282  * wm_gmii_hv_writereg:	[mii interface function]
   7283  *
   7284  *	Write a PHY register on the kumeran.
   7285  * This could be handled by the PHY layer if we didn't have to lock the
   7286  * ressource ...
   7287  */
   7288 static void
   7289 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   7290 {
   7291 	struct wm_softc *sc = device_private(self);
   7292 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7293 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7294 
   7295 	if (wm_get_swfwhw_semaphore(sc)) {
   7296 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7297 		    __func__);
   7298 		return;
   7299 	}
   7300 
   7301 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7302 
   7303 	/* Page 800 works differently than the rest so it has its own func */
   7304 	if (page == BM_WUC_PAGE) {
   7305 		uint16_t tmp;
   7306 
   7307 		tmp = val;
   7308 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   7309 		return;
   7310 	}
   7311 
   7312 	/*
   7313 	 * Lower than page 768 works differently than the rest so it has its
   7314 	 * own func
   7315 	 */
   7316 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7317 		printf("gmii_hv_writereg!!!\n");
   7318 		return;
   7319 	}
   7320 
   7321 	/*
   7322 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   7323 	 * Power Down (whenever bit 11 of the PHY control register is set)
   7324 	 */
   7325 
   7326 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7327 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7328 		    page << BME1000_PAGE_SHIFT);
   7329 	}
   7330 
   7331 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   7332 	wm_put_swfwhw_semaphore(sc);
   7333 }
   7334 
   7335 /*
   7336  * wm_gmii_82580_readreg:	[mii interface function]
   7337  *
   7338  *	Read a PHY register on the 82580 and I350.
   7339  * This could be handled by the PHY layer if we didn't have to lock the
   7340  * ressource ...
   7341  */
   7342 static int
   7343 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   7344 {
   7345 	struct wm_softc *sc = device_private(self);
   7346 	int sem;
   7347 	int rv;
   7348 
   7349 	sem = swfwphysem[sc->sc_funcid];
   7350 	if (wm_get_swfw_semaphore(sc, sem)) {
   7351 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7352 		    __func__);
   7353 		return 0;
   7354 	}
   7355 
   7356 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   7357 
   7358 	wm_put_swfw_semaphore(sc, sem);
   7359 	return rv;
   7360 }
   7361 
   7362 /*
   7363  * wm_gmii_82580_writereg:	[mii interface function]
   7364  *
   7365  *	Write a PHY register on the 82580 and I350.
   7366  * This could be handled by the PHY layer if we didn't have to lock the
   7367  * ressource ...
   7368  */
   7369 static void
   7370 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   7371 {
   7372 	struct wm_softc *sc = device_private(self);
   7373 	int sem;
   7374 
   7375 	sem = swfwphysem[sc->sc_funcid];
   7376 	if (wm_get_swfw_semaphore(sc, sem)) {
   7377 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7378 		    __func__);
   7379 		return;
   7380 	}
   7381 
   7382 	wm_gmii_i82544_writereg(self, phy, reg, val);
   7383 
   7384 	wm_put_swfw_semaphore(sc, sem);
   7385 }
   7386 
   7387 /*
   7388  * wm_gmii_gs40g_readreg:	[mii interface function]
   7389  *
   7390  *	Read a PHY register on the I2100 and I211.
   7391  * This could be handled by the PHY layer if we didn't have to lock the
   7392  * ressource ...
   7393  */
   7394 static int
   7395 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   7396 {
   7397 	struct wm_softc *sc = device_private(self);
   7398 	int sem;
   7399 	int page, offset;
   7400 	int rv;
   7401 
   7402 	/* Acquire semaphore */
   7403 	sem = swfwphysem[sc->sc_funcid];
   7404 	if (wm_get_swfw_semaphore(sc, sem)) {
   7405 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7406 		    __func__);
   7407 		return 0;
   7408 	}
   7409 
   7410 	/* Page select */
   7411 	page = reg >> GS40G_PAGE_SHIFT;
   7412 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   7413 
   7414 	/* Read reg */
   7415 	offset = reg & GS40G_OFFSET_MASK;
   7416 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   7417 
   7418 	wm_put_swfw_semaphore(sc, sem);
   7419 	return rv;
   7420 }
   7421 
   7422 /*
   7423  * wm_gmii_gs40g_writereg:	[mii interface function]
   7424  *
   7425  *	Write a PHY register on the I210 and I211.
   7426  * This could be handled by the PHY layer if we didn't have to lock the
   7427  * ressource ...
   7428  */
   7429 static void
   7430 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   7431 {
   7432 	struct wm_softc *sc = device_private(self);
   7433 	int sem;
   7434 	int page, offset;
   7435 
   7436 	/* Acquire semaphore */
   7437 	sem = swfwphysem[sc->sc_funcid];
   7438 	if (wm_get_swfw_semaphore(sc, sem)) {
   7439 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7440 		    __func__);
   7441 		return;
   7442 	}
   7443 
   7444 	/* Page select */
   7445 	page = reg >> GS40G_PAGE_SHIFT;
   7446 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   7447 
   7448 	/* Write reg */
   7449 	offset = reg & GS40G_OFFSET_MASK;
   7450 	wm_gmii_i82544_writereg(self, phy, offset, val);
   7451 
   7452 	/* Release semaphore */
   7453 	wm_put_swfw_semaphore(sc, sem);
   7454 }
   7455 
   7456 /*
   7457  * wm_gmii_statchg:	[mii interface function]
   7458  *
   7459  *	Callback from MII layer when media changes.
   7460  */
   7461 static void
   7462 wm_gmii_statchg(struct ifnet *ifp)
   7463 {
   7464 	struct wm_softc *sc = ifp->if_softc;
   7465 	struct mii_data *mii = &sc->sc_mii;
   7466 
   7467 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   7468 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7469 	sc->sc_fcrtl &= ~FCRTL_XONE;
   7470 
   7471 	/*
   7472 	 * Get flow control negotiation result.
   7473 	 */
   7474 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   7475 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   7476 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   7477 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   7478 	}
   7479 
   7480 	if (sc->sc_flowflags & IFM_FLOW) {
   7481 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   7482 			sc->sc_ctrl |= CTRL_TFCE;
   7483 			sc->sc_fcrtl |= FCRTL_XONE;
   7484 		}
   7485 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   7486 			sc->sc_ctrl |= CTRL_RFCE;
   7487 	}
   7488 
   7489 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7490 		DPRINTF(WM_DEBUG_LINK,
   7491 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   7492 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7493 	} else {
   7494 		DPRINTF(WM_DEBUG_LINK,
   7495 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   7496 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7497 	}
   7498 
   7499 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7500 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7501 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   7502 						 : WMREG_FCRTL, sc->sc_fcrtl);
   7503 	if (sc->sc_type == WM_T_80003) {
   7504 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   7505 		case IFM_1000_T:
   7506 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7507 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   7508 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7509 			break;
   7510 		default:
   7511 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7512 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   7513 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   7514 			break;
   7515 		}
   7516 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7517 	}
   7518 }
   7519 
   7520 /*
   7521  * wm_kmrn_readreg:
   7522  *
   7523  *	Read a kumeran register
   7524  */
   7525 static int
   7526 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   7527 {
   7528 	int rv;
   7529 
   7530 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   7531 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7532 			aprint_error_dev(sc->sc_dev,
   7533 			    "%s: failed to get semaphore\n", __func__);
   7534 			return 0;
   7535 		}
   7536 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   7537 		if (wm_get_swfwhw_semaphore(sc)) {
   7538 			aprint_error_dev(sc->sc_dev,
   7539 			    "%s: failed to get semaphore\n", __func__);
   7540 			return 0;
   7541 		}
   7542 	}
   7543 
   7544 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7545 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7546 	    KUMCTRLSTA_REN);
   7547 	CSR_WRITE_FLUSH(sc);
   7548 	delay(2);
   7549 
   7550 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   7551 
   7552 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   7553 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7554 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   7555 		wm_put_swfwhw_semaphore(sc);
   7556 
   7557 	return rv;
   7558 }
   7559 
   7560 /*
   7561  * wm_kmrn_writereg:
   7562  *
   7563  *	Write a kumeran register
   7564  */
   7565 static void
   7566 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   7567 {
   7568 
   7569 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   7570 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7571 			aprint_error_dev(sc->sc_dev,
   7572 			    "%s: failed to get semaphore\n", __func__);
   7573 			return;
   7574 		}
   7575 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   7576 		if (wm_get_swfwhw_semaphore(sc)) {
   7577 			aprint_error_dev(sc->sc_dev,
   7578 			    "%s: failed to get semaphore\n", __func__);
   7579 			return;
   7580 		}
   7581 	}
   7582 
   7583 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7584 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7585 	    (val & KUMCTRLSTA_MASK));
   7586 
   7587 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   7588 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7589 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   7590 		wm_put_swfwhw_semaphore(sc);
   7591 }
   7592 
   7593 /* SGMII related */
   7594 
   7595 /*
   7596  * wm_sgmii_uses_mdio
   7597  *
   7598  * Check whether the transaction is to the internal PHY or the external
   7599  * MDIO interface. Return true if it's MDIO.
   7600  */
   7601 static bool
   7602 wm_sgmii_uses_mdio(struct wm_softc *sc)
   7603 {
   7604 	uint32_t reg;
   7605 	bool ismdio = false;
   7606 
   7607 	switch (sc->sc_type) {
   7608 	case WM_T_82575:
   7609 	case WM_T_82576:
   7610 		reg = CSR_READ(sc, WMREG_MDIC);
   7611 		ismdio = ((reg & MDIC_DEST) != 0);
   7612 		break;
   7613 	case WM_T_82580:
   7614 	case WM_T_I350:
   7615 	case WM_T_I354:
   7616 	case WM_T_I210:
   7617 	case WM_T_I211:
   7618 		reg = CSR_READ(sc, WMREG_MDICNFG);
   7619 		ismdio = ((reg & MDICNFG_DEST) != 0);
   7620 		break;
   7621 	default:
   7622 		break;
   7623 	}
   7624 
   7625 	return ismdio;
   7626 }
   7627 
   7628 /*
   7629  * wm_sgmii_readreg:	[mii interface function]
   7630  *
   7631  *	Read a PHY register on the SGMII
   7632  * This could be handled by the PHY layer if we didn't have to lock the
   7633  * ressource ...
   7634  */
   7635 static int
   7636 wm_sgmii_readreg(device_t self, int phy, int reg)
   7637 {
   7638 	struct wm_softc *sc = device_private(self);
   7639 	uint32_t i2ccmd;
   7640 	int i, rv;
   7641 
   7642 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7643 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7644 		    __func__);
   7645 		return 0;
   7646 	}
   7647 
   7648 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7649 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7650 	    | I2CCMD_OPCODE_READ;
   7651 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7652 
   7653 	/* Poll the ready bit */
   7654 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7655 		delay(50);
   7656 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7657 		if (i2ccmd & I2CCMD_READY)
   7658 			break;
   7659 	}
   7660 	if ((i2ccmd & I2CCMD_READY) == 0)
   7661 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7662 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7663 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7664 
   7665 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7666 
   7667 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7668 	return rv;
   7669 }
   7670 
   7671 /*
   7672  * wm_sgmii_writereg:	[mii interface function]
   7673  *
   7674  *	Write a PHY register on the SGMII.
   7675  * This could be handled by the PHY layer if we didn't have to lock the
   7676  * ressource ...
   7677  */
   7678 static void
   7679 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7680 {
   7681 	struct wm_softc *sc = device_private(self);
   7682 	uint32_t i2ccmd;
   7683 	int i;
   7684 	int val_swapped;
   7685 
   7686 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7687 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7688 		    __func__);
   7689 		return;
   7690 	}
   7691 	/* Swap the data bytes for the I2C interface */
   7692 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   7693 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7694 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7695 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   7696 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7697 
   7698 	/* Poll the ready bit */
   7699 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7700 		delay(50);
   7701 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7702 		if (i2ccmd & I2CCMD_READY)
   7703 			break;
   7704 	}
   7705 	if ((i2ccmd & I2CCMD_READY) == 0)
   7706 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7707 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7708 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7709 
   7710 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7711 }
   7712 
   7713 /* TBI related */
   7714 
   7715 /*
   7716  * wm_tbi_mediainit:
   7717  *
   7718  *	Initialize media for use on 1000BASE-X devices.
   7719  */
   7720 static void
   7721 wm_tbi_mediainit(struct wm_softc *sc)
   7722 {
   7723 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7724 	const char *sep = "";
   7725 
   7726 	if (sc->sc_type < WM_T_82543)
   7727 		sc->sc_tipg = TIPG_WM_DFLT;
   7728 	else
   7729 		sc->sc_tipg = TIPG_LG_DFLT;
   7730 
   7731 	sc->sc_tbi_serdes_anegticks = 5;
   7732 
   7733 	/* Initialize our media structures */
   7734 	sc->sc_mii.mii_ifp = ifp;
   7735 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7736 
   7737 	if ((sc->sc_type >= WM_T_82575)
   7738 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   7739 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   7740 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   7741 	else
   7742 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   7743 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   7744 
   7745 	/*
   7746 	 * SWD Pins:
   7747 	 *
   7748 	 *	0 = Link LED (output)
   7749 	 *	1 = Loss Of Signal (input)
   7750 	 */
   7751 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   7752 
   7753 	/* XXX Perhaps this is only for TBI */
   7754 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   7755 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   7756 
   7757 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7758 		sc->sc_ctrl &= ~CTRL_LRST;
   7759 
   7760 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7761 
   7762 #define	ADD(ss, mm, dd)							\
   7763 do {									\
   7764 	aprint_normal("%s%s", sep, ss);					\
   7765 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   7766 	sep = ", ";							\
   7767 } while (/*CONSTCOND*/0)
   7768 
   7769 	aprint_normal_dev(sc->sc_dev, "");
   7770 
   7771 	/* Only 82545 is LX */
   7772 	if (sc->sc_type == WM_T_82545) {
   7773 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   7774 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   7775 	} else {
   7776 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   7777 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   7778 	}
   7779 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   7780 	aprint_normal("\n");
   7781 
   7782 #undef ADD
   7783 
   7784 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   7785 }
   7786 
   7787 /*
   7788  * wm_tbi_mediachange:	[ifmedia interface function]
   7789  *
   7790  *	Set hardware to newly-selected media on a 1000BASE-X device.
   7791  */
   7792 static int
   7793 wm_tbi_mediachange(struct ifnet *ifp)
   7794 {
   7795 	struct wm_softc *sc = ifp->if_softc;
   7796 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7797 	uint32_t status;
   7798 	int i;
   7799 
   7800 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   7801 		/* XXX need some work for >= 82571 and < 82575 */
   7802 		if (sc->sc_type < WM_T_82575)
   7803 			return 0;
   7804 	}
   7805 
   7806 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   7807 	    || (sc->sc_type >= WM_T_82575))
   7808 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   7809 
   7810 	sc->sc_ctrl &= ~CTRL_LRST;
   7811 	sc->sc_txcw = TXCW_ANE;
   7812 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   7813 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   7814 	else if (ife->ifm_media & IFM_FDX)
   7815 		sc->sc_txcw |= TXCW_FD;
   7816 	else
   7817 		sc->sc_txcw |= TXCW_HD;
   7818 
   7819 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   7820 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   7821 
   7822 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   7823 		    device_xname(sc->sc_dev), sc->sc_txcw));
   7824 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7825 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7826 	CSR_WRITE_FLUSH(sc);
   7827 	delay(1000);
   7828 
   7829 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   7830 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   7831 
   7832 	/*
   7833 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   7834 	 * optics detect a signal, 0 if they don't.
   7835 	 */
   7836 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   7837 		/* Have signal; wait for the link to come up. */
   7838 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   7839 			delay(10000);
   7840 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   7841 				break;
   7842 		}
   7843 
   7844 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   7845 			    device_xname(sc->sc_dev),i));
   7846 
   7847 		status = CSR_READ(sc, WMREG_STATUS);
   7848 		DPRINTF(WM_DEBUG_LINK,
   7849 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   7850 			device_xname(sc->sc_dev),status, STATUS_LU));
   7851 		if (status & STATUS_LU) {
   7852 			/* Link is up. */
   7853 			DPRINTF(WM_DEBUG_LINK,
   7854 			    ("%s: LINK: set media -> link up %s\n",
   7855 			    device_xname(sc->sc_dev),
   7856 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7857 
   7858 			/*
   7859 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7860 			 * so we should update sc->sc_ctrl
   7861 			 */
   7862 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7863 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7864 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7865 			if (status & STATUS_FD)
   7866 				sc->sc_tctl |=
   7867 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7868 			else
   7869 				sc->sc_tctl |=
   7870 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7871 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   7872 				sc->sc_fcrtl |= FCRTL_XONE;
   7873 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7874 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7875 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7876 				      sc->sc_fcrtl);
   7877 			sc->sc_tbi_linkup = 1;
   7878 		} else {
   7879 			if (i == WM_LINKUP_TIMEOUT)
   7880 				wm_check_for_link(sc);
   7881 			/* Link is down. */
   7882 			DPRINTF(WM_DEBUG_LINK,
   7883 			    ("%s: LINK: set media -> link down\n",
   7884 			    device_xname(sc->sc_dev)));
   7885 			sc->sc_tbi_linkup = 0;
   7886 		}
   7887 	} else {
   7888 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   7889 		    device_xname(sc->sc_dev)));
   7890 		sc->sc_tbi_linkup = 0;
   7891 	}
   7892 
   7893 	wm_tbi_serdes_set_linkled(sc);
   7894 
   7895 	return 0;
   7896 }
   7897 
   7898 /*
   7899  * wm_tbi_mediastatus:	[ifmedia interface function]
   7900  *
   7901  *	Get the current interface media status on a 1000BASE-X device.
   7902  */
   7903 static void
   7904 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7905 {
   7906 	struct wm_softc *sc = ifp->if_softc;
   7907 	uint32_t ctrl, status;
   7908 
   7909 	ifmr->ifm_status = IFM_AVALID;
   7910 	ifmr->ifm_active = IFM_ETHER;
   7911 
   7912 	status = CSR_READ(sc, WMREG_STATUS);
   7913 	if ((status & STATUS_LU) == 0) {
   7914 		ifmr->ifm_active |= IFM_NONE;
   7915 		return;
   7916 	}
   7917 
   7918 	ifmr->ifm_status |= IFM_ACTIVE;
   7919 	/* Only 82545 is LX */
   7920 	if (sc->sc_type == WM_T_82545)
   7921 		ifmr->ifm_active |= IFM_1000_LX;
   7922 	else
   7923 		ifmr->ifm_active |= IFM_1000_SX;
   7924 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   7925 		ifmr->ifm_active |= IFM_FDX;
   7926 	else
   7927 		ifmr->ifm_active |= IFM_HDX;
   7928 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7929 	if (ctrl & CTRL_RFCE)
   7930 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   7931 	if (ctrl & CTRL_TFCE)
   7932 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   7933 }
   7934 
   7935 /* XXX TBI only */
   7936 static int
   7937 wm_check_for_link(struct wm_softc *sc)
   7938 {
   7939 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7940 	uint32_t rxcw;
   7941 	uint32_t ctrl;
   7942 	uint32_t status;
   7943 	uint32_t sig;
   7944 
   7945 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   7946 		/* XXX need some work for >= 82571 */
   7947 		if (sc->sc_type >= WM_T_82571) {
   7948 			sc->sc_tbi_linkup = 1;
   7949 			return 0;
   7950 		}
   7951 	}
   7952 
   7953 	rxcw = CSR_READ(sc, WMREG_RXCW);
   7954 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7955 	status = CSR_READ(sc, WMREG_STATUS);
   7956 
   7957 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   7958 
   7959 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   7960 		device_xname(sc->sc_dev), __func__,
   7961 		((ctrl & CTRL_SWDPIN(1)) == sig),
   7962 		((status & STATUS_LU) != 0),
   7963 		((rxcw & RXCW_C) != 0)
   7964 		    ));
   7965 
   7966 	/*
   7967 	 * SWDPIN   LU RXCW
   7968 	 *      0    0    0
   7969 	 *      0    0    1	(should not happen)
   7970 	 *      0    1    0	(should not happen)
   7971 	 *      0    1    1	(should not happen)
   7972 	 *      1    0    0	Disable autonego and force linkup
   7973 	 *      1    0    1	got /C/ but not linkup yet
   7974 	 *      1    1    0	(linkup)
   7975 	 *      1    1    1	If IFM_AUTO, back to autonego
   7976 	 *
   7977 	 */
   7978 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7979 	    && ((status & STATUS_LU) == 0)
   7980 	    && ((rxcw & RXCW_C) == 0)) {
   7981 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   7982 			__func__));
   7983 		sc->sc_tbi_linkup = 0;
   7984 		/* Disable auto-negotiation in the TXCW register */
   7985 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   7986 
   7987 		/*
   7988 		 * Force link-up and also force full-duplex.
   7989 		 *
   7990 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   7991 		 * so we should update sc->sc_ctrl
   7992 		 */
   7993 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   7994 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7995 	} else if (((status & STATUS_LU) != 0)
   7996 	    && ((rxcw & RXCW_C) != 0)
   7997 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   7998 		sc->sc_tbi_linkup = 1;
   7999 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   8000 			__func__));
   8001 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8002 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   8003 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8004 	    && ((rxcw & RXCW_C) != 0)) {
   8005 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   8006 	} else {
   8007 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   8008 			status));
   8009 	}
   8010 
   8011 	return 0;
   8012 }
   8013 
   8014 /*
   8015  * wm_tbi_tick:
   8016  *
   8017  *	Check the link on TBI devices.
   8018  *	This function acts as mii_tick().
   8019  */
   8020 static void
   8021 wm_tbi_tick(struct wm_softc *sc)
   8022 {
   8023 	struct mii_data *mii = &sc->sc_mii;
   8024 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   8025 	uint32_t status;
   8026 
   8027 	KASSERT(WM_TX_LOCKED(sc));
   8028 
   8029 	status = CSR_READ(sc, WMREG_STATUS);
   8030 
   8031 	/* XXX is this needed? */
   8032 	(void)CSR_READ(sc, WMREG_RXCW);
   8033 	(void)CSR_READ(sc, WMREG_CTRL);
   8034 
   8035 	/* set link status */
   8036 	if ((status & STATUS_LU) == 0) {
   8037 		DPRINTF(WM_DEBUG_LINK,
   8038 		    ("%s: LINK: checklink -> down\n",
   8039 			device_xname(sc->sc_dev)));
   8040 		sc->sc_tbi_linkup = 0;
   8041 	} else if (sc->sc_tbi_linkup == 0) {
   8042 		DPRINTF(WM_DEBUG_LINK,
   8043 		    ("%s: LINK: checklink -> up %s\n",
   8044 			device_xname(sc->sc_dev),
   8045 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8046 		sc->sc_tbi_linkup = 1;
   8047 		sc->sc_tbi_serdes_ticks = 0;
   8048 	}
   8049 
   8050 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   8051 		goto setled;
   8052 
   8053 	if ((status & STATUS_LU) == 0) {
   8054 		sc->sc_tbi_linkup = 0;
   8055 		/* If the timer expired, retry autonegotiation */
   8056 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8057 		    && (++sc->sc_tbi_serdes_ticks
   8058 			>= sc->sc_tbi_serdes_anegticks)) {
   8059 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   8060 			sc->sc_tbi_serdes_ticks = 0;
   8061 			/*
   8062 			 * Reset the link, and let autonegotiation do
   8063 			 * its thing
   8064 			 */
   8065 			sc->sc_ctrl |= CTRL_LRST;
   8066 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8067 			CSR_WRITE_FLUSH(sc);
   8068 			delay(1000);
   8069 			sc->sc_ctrl &= ~CTRL_LRST;
   8070 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8071 			CSR_WRITE_FLUSH(sc);
   8072 			delay(1000);
   8073 			CSR_WRITE(sc, WMREG_TXCW,
   8074 			    sc->sc_txcw & ~TXCW_ANE);
   8075 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8076 		}
   8077 	}
   8078 
   8079 setled:
   8080 	wm_tbi_serdes_set_linkled(sc);
   8081 }
   8082 
   8083 /* SERDES related */
   8084 static void
   8085 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   8086 {
   8087 	uint32_t reg;
   8088 
   8089 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   8090 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   8091 		return;
   8092 
   8093 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   8094 	reg |= PCS_CFG_PCS_EN;
   8095 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   8096 
   8097 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8098 	reg &= ~CTRL_EXT_SWDPIN(3);
   8099 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8100 	CSR_WRITE_FLUSH(sc);
   8101 }
   8102 
   8103 static int
   8104 wm_serdes_mediachange(struct ifnet *ifp)
   8105 {
   8106 	struct wm_softc *sc = ifp->if_softc;
   8107 	bool pcs_autoneg = true; /* XXX */
   8108 	uint32_t ctrl_ext, pcs_lctl, reg;
   8109 
   8110 	/* XXX Currently, this function is not called on 8257[12] */
   8111 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   8112 	    || (sc->sc_type >= WM_T_82575))
   8113 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   8114 
   8115 	wm_serdes_power_up_link_82575(sc);
   8116 
   8117 	sc->sc_ctrl |= CTRL_SLU;
   8118 
   8119 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   8120 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   8121 
   8122 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8123 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   8124 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   8125 	case CTRL_EXT_LINK_MODE_SGMII:
   8126 		pcs_autoneg = true;
   8127 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   8128 		break;
   8129 	case CTRL_EXT_LINK_MODE_1000KX:
   8130 		pcs_autoneg = false;
   8131 		/* FALLTHROUGH */
   8132 	default:
   8133 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   8134 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   8135 				pcs_autoneg = false;
   8136 		}
   8137 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   8138 		    | CTRL_FRCFDX;
   8139 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   8140 	}
   8141 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8142 
   8143 	if (pcs_autoneg) {
   8144 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   8145 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   8146 
   8147 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   8148 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   8149 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   8150 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   8151 	} else
   8152 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   8153 
   8154 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   8155 
   8156 
   8157 	return 0;
   8158 }
   8159 
   8160 static void
   8161 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8162 {
   8163 	struct wm_softc *sc = ifp->if_softc;
   8164 	struct mii_data *mii = &sc->sc_mii;
   8165 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8166 	uint32_t pcs_adv, pcs_lpab, reg;
   8167 
   8168 	ifmr->ifm_status = IFM_AVALID;
   8169 	ifmr->ifm_active = IFM_ETHER;
   8170 
   8171 	/* Check PCS */
   8172 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8173 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   8174 		ifmr->ifm_active |= IFM_NONE;
   8175 		sc->sc_tbi_linkup = 0;
   8176 		goto setled;
   8177 	}
   8178 
   8179 	sc->sc_tbi_linkup = 1;
   8180 	ifmr->ifm_status |= IFM_ACTIVE;
   8181 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   8182 	if ((reg & PCS_LSTS_FDX) != 0)
   8183 		ifmr->ifm_active |= IFM_FDX;
   8184 	else
   8185 		ifmr->ifm_active |= IFM_HDX;
   8186 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   8187 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8188 		/* Check flow */
   8189 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8190 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8191 			printf("XXX LINKOK but not ACOMP\n");
   8192 			goto setled;
   8193 		}
   8194 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8195 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8196 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   8197 		if ((pcs_adv & TXCW_SYM_PAUSE)
   8198 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8199 			mii->mii_media_active |= IFM_FLOW
   8200 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8201 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8202 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   8203 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   8204 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   8205 			mii->mii_media_active |= IFM_FLOW
   8206 			    | IFM_ETH_TXPAUSE;
   8207 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   8208 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   8209 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8210 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   8211 			mii->mii_media_active |= IFM_FLOW
   8212 			    | IFM_ETH_RXPAUSE;
   8213 		} else {
   8214 		}
   8215 	}
   8216 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8217 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   8218 setled:
   8219 	wm_tbi_serdes_set_linkled(sc);
   8220 }
   8221 
   8222 /*
   8223  * wm_serdes_tick:
   8224  *
   8225  *	Check the link on serdes devices.
   8226  */
   8227 static void
   8228 wm_serdes_tick(struct wm_softc *sc)
   8229 {
   8230 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8231 	struct mii_data *mii = &sc->sc_mii;
   8232 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   8233 	uint32_t reg;
   8234 
   8235 	KASSERT(WM_TX_LOCKED(sc));
   8236 
   8237 	mii->mii_media_status = IFM_AVALID;
   8238 	mii->mii_media_active = IFM_ETHER;
   8239 
   8240 	/* Check PCS */
   8241 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8242 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   8243 		mii->mii_media_status |= IFM_ACTIVE;
   8244 		sc->sc_tbi_linkup = 1;
   8245 		sc->sc_tbi_serdes_ticks = 0;
   8246 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   8247 		if ((reg & PCS_LSTS_FDX) != 0)
   8248 			mii->mii_media_active |= IFM_FDX;
   8249 		else
   8250 			mii->mii_media_active |= IFM_HDX;
   8251 	} else {
   8252 		mii->mii_media_status |= IFM_NONE;
   8253 		sc->sc_tbi_linkup = 0;
   8254 		    /* If the timer expired, retry autonegotiation */
   8255 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8256 		    && (++sc->sc_tbi_serdes_ticks
   8257 			>= sc->sc_tbi_serdes_anegticks)) {
   8258 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   8259 			sc->sc_tbi_serdes_ticks = 0;
   8260 			/* XXX */
   8261 			wm_serdes_mediachange(ifp);
   8262 		}
   8263 	}
   8264 
   8265 	wm_tbi_serdes_set_linkled(sc);
   8266 }
   8267 
   8268 /* SFP related */
   8269 
   8270 static int
   8271 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   8272 {
   8273 	uint32_t i2ccmd;
   8274 	int i;
   8275 
   8276 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   8277 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8278 
   8279 	/* Poll the ready bit */
   8280 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8281 		delay(50);
   8282 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8283 		if (i2ccmd & I2CCMD_READY)
   8284 			break;
   8285 	}
   8286 	if ((i2ccmd & I2CCMD_READY) == 0)
   8287 		return -1;
   8288 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8289 		return -1;
   8290 
   8291 	*data = i2ccmd & 0x00ff;
   8292 
   8293 	return 0;
   8294 }
   8295 
   8296 static uint32_t
   8297 wm_sfp_get_media_type(struct wm_softc *sc)
   8298 {
   8299 	uint32_t ctrl_ext;
   8300 	uint8_t val = 0;
   8301 	int timeout = 3;
   8302 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   8303 	int rv = -1;
   8304 
   8305 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8306 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   8307 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   8308 	CSR_WRITE_FLUSH(sc);
   8309 
   8310 	/* Read SFP module data */
   8311 	while (timeout) {
   8312 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   8313 		if (rv == 0)
   8314 			break;
   8315 		delay(100*1000); /* XXX too big */
   8316 		timeout--;
   8317 	}
   8318 	if (rv != 0)
   8319 		goto out;
   8320 	switch (val) {
   8321 	case SFF_SFP_ID_SFF:
   8322 		aprint_normal_dev(sc->sc_dev,
   8323 		    "Module/Connector soldered to board\n");
   8324 		break;
   8325 	case SFF_SFP_ID_SFP:
   8326 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   8327 		break;
   8328 	case SFF_SFP_ID_UNKNOWN:
   8329 		goto out;
   8330 	default:
   8331 		break;
   8332 	}
   8333 
   8334 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   8335 	if (rv != 0) {
   8336 		goto out;
   8337 	}
   8338 
   8339 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   8340 		mediatype = WM_MEDIATYPE_SERDES;
   8341 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   8342 		sc->sc_flags |= WM_F_SGMII;
   8343 		mediatype = WM_MEDIATYPE_COPPER;
   8344 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   8345 		sc->sc_flags |= WM_F_SGMII;
   8346 		mediatype = WM_MEDIATYPE_SERDES;
   8347 	}
   8348 
   8349 out:
   8350 	/* Restore I2C interface setting */
   8351 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8352 
   8353 	return mediatype;
   8354 }
   8355 /*
   8356  * NVM related.
   8357  * Microwire, SPI (w/wo EERD) and Flash.
   8358  */
   8359 
   8360 /* Both spi and uwire */
   8361 
   8362 /*
   8363  * wm_eeprom_sendbits:
   8364  *
   8365  *	Send a series of bits to the EEPROM.
   8366  */
   8367 static void
   8368 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   8369 {
   8370 	uint32_t reg;
   8371 	int x;
   8372 
   8373 	reg = CSR_READ(sc, WMREG_EECD);
   8374 
   8375 	for (x = nbits; x > 0; x--) {
   8376 		if (bits & (1U << (x - 1)))
   8377 			reg |= EECD_DI;
   8378 		else
   8379 			reg &= ~EECD_DI;
   8380 		CSR_WRITE(sc, WMREG_EECD, reg);
   8381 		CSR_WRITE_FLUSH(sc);
   8382 		delay(2);
   8383 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   8384 		CSR_WRITE_FLUSH(sc);
   8385 		delay(2);
   8386 		CSR_WRITE(sc, WMREG_EECD, reg);
   8387 		CSR_WRITE_FLUSH(sc);
   8388 		delay(2);
   8389 	}
   8390 }
   8391 
   8392 /*
   8393  * wm_eeprom_recvbits:
   8394  *
   8395  *	Receive a series of bits from the EEPROM.
   8396  */
   8397 static void
   8398 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   8399 {
   8400 	uint32_t reg, val;
   8401 	int x;
   8402 
   8403 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   8404 
   8405 	val = 0;
   8406 	for (x = nbits; x > 0; x--) {
   8407 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   8408 		CSR_WRITE_FLUSH(sc);
   8409 		delay(2);
   8410 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   8411 			val |= (1U << (x - 1));
   8412 		CSR_WRITE(sc, WMREG_EECD, reg);
   8413 		CSR_WRITE_FLUSH(sc);
   8414 		delay(2);
   8415 	}
   8416 	*valp = val;
   8417 }
   8418 
   8419 /* Microwire */
   8420 
   8421 /*
   8422  * wm_nvm_read_uwire:
   8423  *
   8424  *	Read a word from the EEPROM using the MicroWire protocol.
   8425  */
   8426 static int
   8427 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8428 {
   8429 	uint32_t reg, val;
   8430 	int i;
   8431 
   8432 	for (i = 0; i < wordcnt; i++) {
   8433 		/* Clear SK and DI. */
   8434 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   8435 		CSR_WRITE(sc, WMREG_EECD, reg);
   8436 
   8437 		/*
   8438 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   8439 		 * and Xen.
   8440 		 *
   8441 		 * We use this workaround only for 82540 because qemu's
   8442 		 * e1000 act as 82540.
   8443 		 */
   8444 		if (sc->sc_type == WM_T_82540) {
   8445 			reg |= EECD_SK;
   8446 			CSR_WRITE(sc, WMREG_EECD, reg);
   8447 			reg &= ~EECD_SK;
   8448 			CSR_WRITE(sc, WMREG_EECD, reg);
   8449 			CSR_WRITE_FLUSH(sc);
   8450 			delay(2);
   8451 		}
   8452 		/* XXX: end of workaround */
   8453 
   8454 		/* Set CHIP SELECT. */
   8455 		reg |= EECD_CS;
   8456 		CSR_WRITE(sc, WMREG_EECD, reg);
   8457 		CSR_WRITE_FLUSH(sc);
   8458 		delay(2);
   8459 
   8460 		/* Shift in the READ command. */
   8461 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   8462 
   8463 		/* Shift in address. */
   8464 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   8465 
   8466 		/* Shift out the data. */
   8467 		wm_eeprom_recvbits(sc, &val, 16);
   8468 		data[i] = val & 0xffff;
   8469 
   8470 		/* Clear CHIP SELECT. */
   8471 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   8472 		CSR_WRITE(sc, WMREG_EECD, reg);
   8473 		CSR_WRITE_FLUSH(sc);
   8474 		delay(2);
   8475 	}
   8476 
   8477 	return 0;
   8478 }
   8479 
   8480 /* SPI */
   8481 
   8482 /*
   8483  * Set SPI and FLASH related information from the EECD register.
   8484  * For 82541 and 82547, the word size is taken from EEPROM.
   8485  */
   8486 static int
   8487 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   8488 {
   8489 	int size;
   8490 	uint32_t reg;
   8491 	uint16_t data;
   8492 
   8493 	reg = CSR_READ(sc, WMREG_EECD);
   8494 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   8495 
   8496 	/* Read the size of NVM from EECD by default */
   8497 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   8498 	switch (sc->sc_type) {
   8499 	case WM_T_82541:
   8500 	case WM_T_82541_2:
   8501 	case WM_T_82547:
   8502 	case WM_T_82547_2:
   8503 		/* Set dummy value to access EEPROM */
   8504 		sc->sc_nvm_wordsize = 64;
   8505 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   8506 		reg = data;
   8507 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   8508 		if (size == 0)
   8509 			size = 6; /* 64 word size */
   8510 		else
   8511 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   8512 		break;
   8513 	case WM_T_80003:
   8514 	case WM_T_82571:
   8515 	case WM_T_82572:
   8516 	case WM_T_82573: /* SPI case */
   8517 	case WM_T_82574: /* SPI case */
   8518 	case WM_T_82583: /* SPI case */
   8519 		size += NVM_WORD_SIZE_BASE_SHIFT;
   8520 		if (size > 14)
   8521 			size = 14;
   8522 		break;
   8523 	case WM_T_82575:
   8524 	case WM_T_82576:
   8525 	case WM_T_82580:
   8526 	case WM_T_I350:
   8527 	case WM_T_I354:
   8528 	case WM_T_I210:
   8529 	case WM_T_I211:
   8530 		size += NVM_WORD_SIZE_BASE_SHIFT;
   8531 		if (size > 15)
   8532 			size = 15;
   8533 		break;
   8534 	default:
   8535 		aprint_error_dev(sc->sc_dev,
   8536 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   8537 		return -1;
   8538 		break;
   8539 	}
   8540 
   8541 	sc->sc_nvm_wordsize = 1 << size;
   8542 
   8543 	return 0;
   8544 }
   8545 
   8546 /*
   8547  * wm_nvm_ready_spi:
   8548  *
   8549  *	Wait for a SPI EEPROM to be ready for commands.
   8550  */
   8551 static int
   8552 wm_nvm_ready_spi(struct wm_softc *sc)
   8553 {
   8554 	uint32_t val;
   8555 	int usec;
   8556 
   8557 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   8558 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   8559 		wm_eeprom_recvbits(sc, &val, 8);
   8560 		if ((val & SPI_SR_RDY) == 0)
   8561 			break;
   8562 	}
   8563 	if (usec >= SPI_MAX_RETRIES) {
   8564 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   8565 		return 1;
   8566 	}
   8567 	return 0;
   8568 }
   8569 
   8570 /*
   8571  * wm_nvm_read_spi:
   8572  *
   8573  *	Read a work from the EEPROM using the SPI protocol.
   8574  */
   8575 static int
   8576 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8577 {
   8578 	uint32_t reg, val;
   8579 	int i;
   8580 	uint8_t opc;
   8581 
   8582 	/* Clear SK and CS. */
   8583 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   8584 	CSR_WRITE(sc, WMREG_EECD, reg);
   8585 	CSR_WRITE_FLUSH(sc);
   8586 	delay(2);
   8587 
   8588 	if (wm_nvm_ready_spi(sc))
   8589 		return 1;
   8590 
   8591 	/* Toggle CS to flush commands. */
   8592 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   8593 	CSR_WRITE_FLUSH(sc);
   8594 	delay(2);
   8595 	CSR_WRITE(sc, WMREG_EECD, reg);
   8596 	CSR_WRITE_FLUSH(sc);
   8597 	delay(2);
   8598 
   8599 	opc = SPI_OPC_READ;
   8600 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   8601 		opc |= SPI_OPC_A8;
   8602 
   8603 	wm_eeprom_sendbits(sc, opc, 8);
   8604 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   8605 
   8606 	for (i = 0; i < wordcnt; i++) {
   8607 		wm_eeprom_recvbits(sc, &val, 16);
   8608 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   8609 	}
   8610 
   8611 	/* Raise CS and clear SK. */
   8612 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   8613 	CSR_WRITE(sc, WMREG_EECD, reg);
   8614 	CSR_WRITE_FLUSH(sc);
   8615 	delay(2);
   8616 
   8617 	return 0;
   8618 }
   8619 
   8620 /* Using with EERD */
   8621 
   8622 static int
   8623 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   8624 {
   8625 	uint32_t attempts = 100000;
   8626 	uint32_t i, reg = 0;
   8627 	int32_t done = -1;
   8628 
   8629 	for (i = 0; i < attempts; i++) {
   8630 		reg = CSR_READ(sc, rw);
   8631 
   8632 		if (reg & EERD_DONE) {
   8633 			done = 0;
   8634 			break;
   8635 		}
   8636 		delay(5);
   8637 	}
   8638 
   8639 	return done;
   8640 }
   8641 
   8642 static int
   8643 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   8644     uint16_t *data)
   8645 {
   8646 	int i, eerd = 0;
   8647 	int error = 0;
   8648 
   8649 	for (i = 0; i < wordcnt; i++) {
   8650 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   8651 
   8652 		CSR_WRITE(sc, WMREG_EERD, eerd);
   8653 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   8654 		if (error != 0)
   8655 			break;
   8656 
   8657 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   8658 	}
   8659 
   8660 	return error;
   8661 }
   8662 
   8663 /* Flash */
   8664 
   8665 static int
   8666 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   8667 {
   8668 	uint32_t eecd;
   8669 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   8670 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   8671 	uint8_t sig_byte = 0;
   8672 
   8673 	switch (sc->sc_type) {
   8674 	case WM_T_ICH8:
   8675 	case WM_T_ICH9:
   8676 		eecd = CSR_READ(sc, WMREG_EECD);
   8677 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   8678 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   8679 			return 0;
   8680 		}
   8681 		/* FALLTHROUGH */
   8682 	default:
   8683 		/* Default to 0 */
   8684 		*bank = 0;
   8685 
   8686 		/* Check bank 0 */
   8687 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   8688 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   8689 			*bank = 0;
   8690 			return 0;
   8691 		}
   8692 
   8693 		/* Check bank 1 */
   8694 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   8695 		    &sig_byte);
   8696 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   8697 			*bank = 1;
   8698 			return 0;
   8699 		}
   8700 	}
   8701 
   8702 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   8703 		device_xname(sc->sc_dev)));
   8704 	return -1;
   8705 }
   8706 
   8707 /******************************************************************************
   8708  * This function does initial flash setup so that a new read/write/erase cycle
   8709  * can be started.
   8710  *
   8711  * sc - The pointer to the hw structure
   8712  ****************************************************************************/
   8713 static int32_t
   8714 wm_ich8_cycle_init(struct wm_softc *sc)
   8715 {
   8716 	uint16_t hsfsts;
   8717 	int32_t error = 1;
   8718 	int32_t i     = 0;
   8719 
   8720 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8721 
   8722 	/* May be check the Flash Des Valid bit in Hw status */
   8723 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   8724 		return error;
   8725 	}
   8726 
   8727 	/* Clear FCERR in Hw status by writing 1 */
   8728 	/* Clear DAEL in Hw status by writing a 1 */
   8729 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   8730 
   8731 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8732 
   8733 	/*
   8734 	 * Either we should have a hardware SPI cycle in progress bit to check
   8735 	 * against, in order to start a new cycle or FDONE bit should be
   8736 	 * changed in the hardware so that it is 1 after harware reset, which
   8737 	 * can then be used as an indication whether a cycle is in progress or
   8738 	 * has been completed .. we should also have some software semaphore
   8739 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   8740 	 * threads access to those bits can be sequentiallized or a way so that
   8741 	 * 2 threads dont start the cycle at the same time
   8742 	 */
   8743 
   8744 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8745 		/*
   8746 		 * There is no cycle running at present, so we can start a
   8747 		 * cycle
   8748 		 */
   8749 
   8750 		/* Begin by setting Flash Cycle Done. */
   8751 		hsfsts |= HSFSTS_DONE;
   8752 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8753 		error = 0;
   8754 	} else {
   8755 		/*
   8756 		 * otherwise poll for sometime so the current cycle has a
   8757 		 * chance to end before giving up.
   8758 		 */
   8759 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   8760 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8761 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8762 				error = 0;
   8763 				break;
   8764 			}
   8765 			delay(1);
   8766 		}
   8767 		if (error == 0) {
   8768 			/*
   8769 			 * Successful in waiting for previous cycle to timeout,
   8770 			 * now set the Flash Cycle Done.
   8771 			 */
   8772 			hsfsts |= HSFSTS_DONE;
   8773 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8774 		}
   8775 	}
   8776 	return error;
   8777 }
   8778 
   8779 /******************************************************************************
   8780  * This function starts a flash cycle and waits for its completion
   8781  *
   8782  * sc - The pointer to the hw structure
   8783  ****************************************************************************/
   8784 static int32_t
   8785 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   8786 {
   8787 	uint16_t hsflctl;
   8788 	uint16_t hsfsts;
   8789 	int32_t error = 1;
   8790 	uint32_t i = 0;
   8791 
   8792 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   8793 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8794 	hsflctl |= HSFCTL_GO;
   8795 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8796 
   8797 	/* Wait till FDONE bit is set to 1 */
   8798 	do {
   8799 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8800 		if (hsfsts & HSFSTS_DONE)
   8801 			break;
   8802 		delay(1);
   8803 		i++;
   8804 	} while (i < timeout);
   8805 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   8806 		error = 0;
   8807 
   8808 	return error;
   8809 }
   8810 
   8811 /******************************************************************************
   8812  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   8813  *
   8814  * sc - The pointer to the hw structure
   8815  * index - The index of the byte or word to read.
   8816  * size - Size of data to read, 1=byte 2=word
   8817  * data - Pointer to the word to store the value read.
   8818  *****************************************************************************/
   8819 static int32_t
   8820 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   8821     uint32_t size, uint16_t *data)
   8822 {
   8823 	uint16_t hsfsts;
   8824 	uint16_t hsflctl;
   8825 	uint32_t flash_linear_address;
   8826 	uint32_t flash_data = 0;
   8827 	int32_t error = 1;
   8828 	int32_t count = 0;
   8829 
   8830 	if (size < 1  || size > 2 || data == 0x0 ||
   8831 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   8832 		return error;
   8833 
   8834 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   8835 	    sc->sc_ich8_flash_base;
   8836 
   8837 	do {
   8838 		delay(1);
   8839 		/* Steps */
   8840 		error = wm_ich8_cycle_init(sc);
   8841 		if (error)
   8842 			break;
   8843 
   8844 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8845 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   8846 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   8847 		    & HSFCTL_BCOUNT_MASK;
   8848 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   8849 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8850 
   8851 		/*
   8852 		 * Write the last 24 bits of index into Flash Linear address
   8853 		 * field in Flash Address
   8854 		 */
   8855 		/* TODO: TBD maybe check the index against the size of flash */
   8856 
   8857 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   8858 
   8859 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   8860 
   8861 		/*
   8862 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   8863 		 * the whole sequence a few more times, else read in (shift in)
   8864 		 * the Flash Data0, the order is least significant byte first
   8865 		 * msb to lsb
   8866 		 */
   8867 		if (error == 0) {
   8868 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   8869 			if (size == 1)
   8870 				*data = (uint8_t)(flash_data & 0x000000FF);
   8871 			else if (size == 2)
   8872 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   8873 			break;
   8874 		} else {
   8875 			/*
   8876 			 * If we've gotten here, then things are probably
   8877 			 * completely hosed, but if the error condition is
   8878 			 * detected, it won't hurt to give it another try...
   8879 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   8880 			 */
   8881 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8882 			if (hsfsts & HSFSTS_ERR) {
   8883 				/* Repeat for some time before giving up. */
   8884 				continue;
   8885 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   8886 				break;
   8887 		}
   8888 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   8889 
   8890 	return error;
   8891 }
   8892 
   8893 /******************************************************************************
   8894  * Reads a single byte from the NVM using the ICH8 flash access registers.
   8895  *
   8896  * sc - pointer to wm_hw structure
   8897  * index - The index of the byte to read.
   8898  * data - Pointer to a byte to store the value read.
   8899  *****************************************************************************/
   8900 static int32_t
   8901 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   8902 {
   8903 	int32_t status;
   8904 	uint16_t word = 0;
   8905 
   8906 	status = wm_read_ich8_data(sc, index, 1, &word);
   8907 	if (status == 0)
   8908 		*data = (uint8_t)word;
   8909 	else
   8910 		*data = 0;
   8911 
   8912 	return status;
   8913 }
   8914 
   8915 /******************************************************************************
   8916  * Reads a word from the NVM using the ICH8 flash access registers.
   8917  *
   8918  * sc - pointer to wm_hw structure
   8919  * index - The starting byte index of the word to read.
   8920  * data - Pointer to a word to store the value read.
   8921  *****************************************************************************/
   8922 static int32_t
   8923 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   8924 {
   8925 	int32_t status;
   8926 
   8927 	status = wm_read_ich8_data(sc, index, 2, data);
   8928 	return status;
   8929 }
   8930 
   8931 /******************************************************************************
   8932  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   8933  * register.
   8934  *
   8935  * sc - Struct containing variables accessed by shared code
   8936  * offset - offset of word in the EEPROM to read
   8937  * data - word read from the EEPROM
   8938  * words - number of words to read
   8939  *****************************************************************************/
   8940 static int
   8941 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   8942 {
   8943 	int32_t  error = 0;
   8944 	uint32_t flash_bank = 0;
   8945 	uint32_t act_offset = 0;
   8946 	uint32_t bank_offset = 0;
   8947 	uint16_t word = 0;
   8948 	uint16_t i = 0;
   8949 
   8950 	/*
   8951 	 * We need to know which is the valid flash bank.  In the event
   8952 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   8953 	 * managing flash_bank.  So it cannot be trusted and needs
   8954 	 * to be updated with each read.
   8955 	 */
   8956 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   8957 	if (error) {
   8958 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   8959 			device_xname(sc->sc_dev)));
   8960 		flash_bank = 0;
   8961 	}
   8962 
   8963 	/*
   8964 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   8965 	 * size
   8966 	 */
   8967 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   8968 
   8969 	error = wm_get_swfwhw_semaphore(sc);
   8970 	if (error) {
   8971 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8972 		    __func__);
   8973 		return error;
   8974 	}
   8975 
   8976 	for (i = 0; i < words; i++) {
   8977 		/* The NVM part needs a byte offset, hence * 2 */
   8978 		act_offset = bank_offset + ((offset + i) * 2);
   8979 		error = wm_read_ich8_word(sc, act_offset, &word);
   8980 		if (error) {
   8981 			aprint_error_dev(sc->sc_dev,
   8982 			    "%s: failed to read NVM\n", __func__);
   8983 			break;
   8984 		}
   8985 		data[i] = word;
   8986 	}
   8987 
   8988 	wm_put_swfwhw_semaphore(sc);
   8989 	return error;
   8990 }
   8991 
   8992 /* iNVM */
   8993 
   8994 static int
   8995 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   8996 {
   8997 	int32_t  rv = 0;
   8998 	uint32_t invm_dword;
   8999 	uint16_t i;
   9000 	uint8_t record_type, word_address;
   9001 
   9002 	for (i = 0; i < INVM_SIZE; i++) {
   9003 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   9004 		/* Get record type */
   9005 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   9006 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   9007 			break;
   9008 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   9009 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   9010 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   9011 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   9012 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   9013 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   9014 			if (word_address == address) {
   9015 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   9016 				rv = 0;
   9017 				break;
   9018 			}
   9019 		}
   9020 	}
   9021 
   9022 	return rv;
   9023 }
   9024 
   9025 static int
   9026 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   9027 {
   9028 	int rv = 0;
   9029 	int i;
   9030 
   9031 	for (i = 0; i < words; i++) {
   9032 		switch (offset + i) {
   9033 		case NVM_OFF_MACADDR:
   9034 		case NVM_OFF_MACADDR1:
   9035 		case NVM_OFF_MACADDR2:
   9036 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   9037 			if (rv != 0) {
   9038 				data[i] = 0xffff;
   9039 				rv = -1;
   9040 			}
   9041 			break;
   9042 		case NVM_OFF_CFG2:
   9043 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9044 			if (rv != 0) {
   9045 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   9046 				rv = 0;
   9047 			}
   9048 			break;
   9049 		case NVM_OFF_CFG4:
   9050 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9051 			if (rv != 0) {
   9052 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   9053 				rv = 0;
   9054 			}
   9055 			break;
   9056 		case NVM_OFF_LED_1_CFG:
   9057 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9058 			if (rv != 0) {
   9059 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   9060 				rv = 0;
   9061 			}
   9062 			break;
   9063 		case NVM_OFF_LED_0_2_CFG:
   9064 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9065 			if (rv != 0) {
   9066 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   9067 				rv = 0;
   9068 			}
   9069 			break;
   9070 		case NVM_OFF_ID_LED_SETTINGS:
   9071 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9072 			if (rv != 0) {
   9073 				*data = ID_LED_RESERVED_FFFF;
   9074 				rv = 0;
   9075 			}
   9076 			break;
   9077 		default:
   9078 			DPRINTF(WM_DEBUG_NVM,
   9079 			    ("NVM word 0x%02x is not mapped.\n", offset));
   9080 			*data = NVM_RESERVED_WORD;
   9081 			break;
   9082 		}
   9083 	}
   9084 
   9085 	return rv;
   9086 }
   9087 
   9088 /* Lock, detecting NVM type, validate checksum, version and read */
   9089 
   9090 /*
   9091  * wm_nvm_acquire:
   9092  *
   9093  *	Perform the EEPROM handshake required on some chips.
   9094  */
   9095 static int
   9096 wm_nvm_acquire(struct wm_softc *sc)
   9097 {
   9098 	uint32_t reg;
   9099 	int x;
   9100 	int ret = 0;
   9101 
   9102 	/* always success */
   9103 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   9104 		return 0;
   9105 
   9106 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9107 		ret = wm_get_swfwhw_semaphore(sc);
   9108 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9109 		/* This will also do wm_get_swsm_semaphore() if needed */
   9110 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   9111 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9112 		ret = wm_get_swsm_semaphore(sc);
   9113 	}
   9114 
   9115 	if (ret) {
   9116 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9117 			__func__);
   9118 		return 1;
   9119 	}
   9120 
   9121 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   9122 		reg = CSR_READ(sc, WMREG_EECD);
   9123 
   9124 		/* Request EEPROM access. */
   9125 		reg |= EECD_EE_REQ;
   9126 		CSR_WRITE(sc, WMREG_EECD, reg);
   9127 
   9128 		/* ..and wait for it to be granted. */
   9129 		for (x = 0; x < 1000; x++) {
   9130 			reg = CSR_READ(sc, WMREG_EECD);
   9131 			if (reg & EECD_EE_GNT)
   9132 				break;
   9133 			delay(5);
   9134 		}
   9135 		if ((reg & EECD_EE_GNT) == 0) {
   9136 			aprint_error_dev(sc->sc_dev,
   9137 			    "could not acquire EEPROM GNT\n");
   9138 			reg &= ~EECD_EE_REQ;
   9139 			CSR_WRITE(sc, WMREG_EECD, reg);
   9140 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9141 				wm_put_swfwhw_semaphore(sc);
   9142 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   9143 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   9144 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   9145 				wm_put_swsm_semaphore(sc);
   9146 			return 1;
   9147 		}
   9148 	}
   9149 
   9150 	return 0;
   9151 }
   9152 
   9153 /*
   9154  * wm_nvm_release:
   9155  *
   9156  *	Release the EEPROM mutex.
   9157  */
   9158 static void
   9159 wm_nvm_release(struct wm_softc *sc)
   9160 {
   9161 	uint32_t reg;
   9162 
   9163 	/* always success */
   9164 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   9165 		return;
   9166 
   9167 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   9168 		reg = CSR_READ(sc, WMREG_EECD);
   9169 		reg &= ~EECD_EE_REQ;
   9170 		CSR_WRITE(sc, WMREG_EECD, reg);
   9171 	}
   9172 
   9173 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9174 		wm_put_swfwhw_semaphore(sc);
   9175 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9176 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   9177 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   9178 		wm_put_swsm_semaphore(sc);
   9179 }
   9180 
   9181 static int
   9182 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   9183 {
   9184 	uint32_t eecd = 0;
   9185 
   9186 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   9187 	    || sc->sc_type == WM_T_82583) {
   9188 		eecd = CSR_READ(sc, WMREG_EECD);
   9189 
   9190 		/* Isolate bits 15 & 16 */
   9191 		eecd = ((eecd >> 15) & 0x03);
   9192 
   9193 		/* If both bits are set, device is Flash type */
   9194 		if (eecd == 0x03)
   9195 			return 0;
   9196 	}
   9197 	return 1;
   9198 }
   9199 
   9200 static int
   9201 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   9202 {
   9203 	uint32_t eec;
   9204 
   9205 	eec = CSR_READ(sc, WMREG_EEC);
   9206 	if ((eec & EEC_FLASH_DETECTED) != 0)
   9207 		return 1;
   9208 
   9209 	return 0;
   9210 }
   9211 
   9212 /*
   9213  * wm_nvm_validate_checksum
   9214  *
   9215  * The checksum is defined as the sum of the first 64 (16 bit) words.
   9216  */
   9217 static int
   9218 wm_nvm_validate_checksum(struct wm_softc *sc)
   9219 {
   9220 	uint16_t checksum;
   9221 	uint16_t eeprom_data;
   9222 #ifdef WM_DEBUG
   9223 	uint16_t csum_wordaddr, valid_checksum;
   9224 #endif
   9225 	int i;
   9226 
   9227 	checksum = 0;
   9228 
   9229 	/* Don't check for I211 */
   9230 	if (sc->sc_type == WM_T_I211)
   9231 		return 0;
   9232 
   9233 #ifdef WM_DEBUG
   9234 	if (sc->sc_type == WM_T_PCH_LPT) {
   9235 		csum_wordaddr = NVM_OFF_COMPAT;
   9236 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   9237 	} else {
   9238 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   9239 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   9240 	}
   9241 
   9242 	/* Dump EEPROM image for debug */
   9243 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   9244 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   9245 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   9246 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   9247 		if ((eeprom_data & valid_checksum) == 0) {
   9248 			DPRINTF(WM_DEBUG_NVM,
   9249 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   9250 				device_xname(sc->sc_dev), eeprom_data,
   9251 				    valid_checksum));
   9252 		}
   9253 	}
   9254 
   9255 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   9256 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   9257 		for (i = 0; i < NVM_SIZE; i++) {
   9258 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   9259 				printf("XXXX ");
   9260 			else
   9261 				printf("%04hx ", eeprom_data);
   9262 			if (i % 8 == 7)
   9263 				printf("\n");
   9264 		}
   9265 	}
   9266 
   9267 #endif /* WM_DEBUG */
   9268 
   9269 	for (i = 0; i < NVM_SIZE; i++) {
   9270 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   9271 			return 1;
   9272 		checksum += eeprom_data;
   9273 	}
   9274 
   9275 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   9276 #ifdef WM_DEBUG
   9277 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   9278 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   9279 #endif
   9280 	}
   9281 
   9282 	return 0;
   9283 }
   9284 
   9285 static void
   9286 wm_nvm_version(struct wm_softc *sc)
   9287 {
   9288 	uint16_t major, minor, build, patch;
   9289 	uint16_t uid0, uid1;
   9290 	uint16_t nvm_data;
   9291 	uint16_t off;
   9292 	bool check_version = false;
   9293 	bool check_optionrom = false;
   9294 	bool have_build = false;
   9295 
   9296 	/*
   9297 	 * Version format:
   9298 	 *
   9299 	 * XYYZ
   9300 	 * X0YZ
   9301 	 * X0YY
   9302 	 *
   9303 	 * Example:
   9304 	 *
   9305 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   9306 	 *	82571	0x50a6	5.10.6?
   9307 	 *	82572	0x506a	5.6.10?
   9308 	 *	82572EI	0x5069	5.6.9?
   9309 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   9310 	 *		0x2013	2.1.3?
   9311 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   9312 	 */
   9313 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   9314 	switch (sc->sc_type) {
   9315 	case WM_T_82571:
   9316 	case WM_T_82572:
   9317 	case WM_T_82574:
   9318 		check_version = true;
   9319 		check_optionrom = true;
   9320 		have_build = true;
   9321 		break;
   9322 	case WM_T_82575:
   9323 	case WM_T_82576:
   9324 	case WM_T_82580:
   9325 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   9326 			check_version = true;
   9327 		break;
   9328 	case WM_T_I211:
   9329 		/* XXX wm_nvm_version_invm(sc); */
   9330 		return;
   9331 	case WM_T_I210:
   9332 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   9333 			/* XXX wm_nvm_version_invm(sc); */
   9334 			return;
   9335 		}
   9336 		/* FALLTHROUGH */
   9337 	case WM_T_I350:
   9338 	case WM_T_I354:
   9339 		check_version = true;
   9340 		check_optionrom = true;
   9341 		break;
   9342 	default:
   9343 		return;
   9344 	}
   9345 	if (check_version) {
   9346 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   9347 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   9348 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   9349 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   9350 			build = nvm_data & NVM_BUILD_MASK;
   9351 			have_build = true;
   9352 		} else
   9353 			minor = nvm_data & 0x00ff;
   9354 
   9355 		/* Decimal */
   9356 		minor = (minor / 16) * 10 + (minor % 16);
   9357 
   9358 		aprint_verbose(", version %d.%d", major, minor);
   9359 		if (have_build)
   9360 			aprint_verbose(".%d", build);
   9361 		sc->sc_nvm_ver_major = major;
   9362 		sc->sc_nvm_ver_minor = minor;
   9363 	}
   9364 	if (check_optionrom) {
   9365 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   9366 		/* Option ROM Version */
   9367 		if ((off != 0x0000) && (off != 0xffff)) {
   9368 			off += NVM_COMBO_VER_OFF;
   9369 			wm_nvm_read(sc, off + 1, 1, &uid1);
   9370 			wm_nvm_read(sc, off, 1, &uid0);
   9371 			if ((uid0 != 0) && (uid0 != 0xffff)
   9372 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   9373 				/* 16bits */
   9374 				major = uid0 >> 8;
   9375 				build = (uid0 << 8) | (uid1 >> 8);
   9376 				patch = uid1 & 0x00ff;
   9377 				aprint_verbose(", option ROM Version %d.%d.%d",
   9378 				    major, build, patch);
   9379 			}
   9380 		}
   9381 	}
   9382 
   9383 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   9384 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   9385 }
   9386 
   9387 /*
   9388  * wm_nvm_read:
   9389  *
   9390  *	Read data from the serial EEPROM.
   9391  */
   9392 static int
   9393 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9394 {
   9395 	int rv;
   9396 
   9397 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   9398 		return 1;
   9399 
   9400 	if (wm_nvm_acquire(sc))
   9401 		return 1;
   9402 
   9403 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   9404 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   9405 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   9406 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   9407 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   9408 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   9409 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   9410 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   9411 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   9412 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   9413 	else
   9414 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   9415 
   9416 	wm_nvm_release(sc);
   9417 	return rv;
   9418 }
   9419 
   9420 /*
   9421  * Hardware semaphores.
   9422  * Very complexed...
   9423  */
   9424 
   9425 static int
   9426 wm_get_swsm_semaphore(struct wm_softc *sc)
   9427 {
   9428 	int32_t timeout;
   9429 	uint32_t swsm;
   9430 
   9431 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9432 		/* Get the SW semaphore. */
   9433 		timeout = sc->sc_nvm_wordsize + 1;
   9434 		while (timeout) {
   9435 			swsm = CSR_READ(sc, WMREG_SWSM);
   9436 
   9437 			if ((swsm & SWSM_SMBI) == 0)
   9438 				break;
   9439 
   9440 			delay(50);
   9441 			timeout--;
   9442 		}
   9443 
   9444 		if (timeout == 0) {
   9445 			aprint_error_dev(sc->sc_dev,
   9446 			    "could not acquire SWSM SMBI\n");
   9447 			return 1;
   9448 		}
   9449 	}
   9450 
   9451 	/* Get the FW semaphore. */
   9452 	timeout = sc->sc_nvm_wordsize + 1;
   9453 	while (timeout) {
   9454 		swsm = CSR_READ(sc, WMREG_SWSM);
   9455 		swsm |= SWSM_SWESMBI;
   9456 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   9457 		/* If we managed to set the bit we got the semaphore. */
   9458 		swsm = CSR_READ(sc, WMREG_SWSM);
   9459 		if (swsm & SWSM_SWESMBI)
   9460 			break;
   9461 
   9462 		delay(50);
   9463 		timeout--;
   9464 	}
   9465 
   9466 	if (timeout == 0) {
   9467 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   9468 		/* Release semaphores */
   9469 		wm_put_swsm_semaphore(sc);
   9470 		return 1;
   9471 	}
   9472 	return 0;
   9473 }
   9474 
   9475 static void
   9476 wm_put_swsm_semaphore(struct wm_softc *sc)
   9477 {
   9478 	uint32_t swsm;
   9479 
   9480 	swsm = CSR_READ(sc, WMREG_SWSM);
   9481 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   9482 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   9483 }
   9484 
   9485 static int
   9486 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   9487 {
   9488 	uint32_t swfw_sync;
   9489 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   9490 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   9491 	int timeout = 200;
   9492 
   9493 	for (timeout = 0; timeout < 200; timeout++) {
   9494 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9495 			if (wm_get_swsm_semaphore(sc)) {
   9496 				aprint_error_dev(sc->sc_dev,
   9497 				    "%s: failed to get semaphore\n",
   9498 				    __func__);
   9499 				return 1;
   9500 			}
   9501 		}
   9502 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   9503 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   9504 			swfw_sync |= swmask;
   9505 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   9506 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   9507 				wm_put_swsm_semaphore(sc);
   9508 			return 0;
   9509 		}
   9510 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   9511 			wm_put_swsm_semaphore(sc);
   9512 		delay(5000);
   9513 	}
   9514 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   9515 	    device_xname(sc->sc_dev), mask, swfw_sync);
   9516 	return 1;
   9517 }
   9518 
   9519 static void
   9520 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   9521 {
   9522 	uint32_t swfw_sync;
   9523 
   9524 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9525 		while (wm_get_swsm_semaphore(sc) != 0)
   9526 			continue;
   9527 	}
   9528 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   9529 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   9530 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   9531 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   9532 		wm_put_swsm_semaphore(sc);
   9533 }
   9534 
   9535 static int
   9536 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   9537 {
   9538 	uint32_t ext_ctrl;
   9539 	int timeout = 200;
   9540 
   9541 	for (timeout = 0; timeout < 200; timeout++) {
   9542 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   9543 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   9544 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   9545 
   9546 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   9547 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   9548 			return 0;
   9549 		delay(5000);
   9550 	}
   9551 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   9552 	    device_xname(sc->sc_dev), ext_ctrl);
   9553 	return 1;
   9554 }
   9555 
   9556 static void
   9557 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   9558 {
   9559 	uint32_t ext_ctrl;
   9560 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   9561 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   9562 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   9563 }
   9564 
   9565 static int
   9566 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   9567 {
   9568 	int i = 0;
   9569 	uint32_t reg;
   9570 
   9571 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9572 	do {
   9573 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   9574 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   9575 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9576 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   9577 			break;
   9578 		delay(2*1000);
   9579 		i++;
   9580 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   9581 
   9582 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   9583 		wm_put_hw_semaphore_82573(sc);
   9584 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   9585 		    device_xname(sc->sc_dev));
   9586 		return -1;
   9587 	}
   9588 
   9589 	return 0;
   9590 }
   9591 
   9592 static void
   9593 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   9594 {
   9595 	uint32_t reg;
   9596 
   9597 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9598 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   9599 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   9600 }
   9601 
   9602 /*
   9603  * Management mode and power management related subroutines.
   9604  * BMC, AMT, suspend/resume and EEE.
   9605  */
   9606 
   9607 static int
   9608 wm_check_mng_mode(struct wm_softc *sc)
   9609 {
   9610 	int rv;
   9611 
   9612 	switch (sc->sc_type) {
   9613 	case WM_T_ICH8:
   9614 	case WM_T_ICH9:
   9615 	case WM_T_ICH10:
   9616 	case WM_T_PCH:
   9617 	case WM_T_PCH2:
   9618 	case WM_T_PCH_LPT:
   9619 		rv = wm_check_mng_mode_ich8lan(sc);
   9620 		break;
   9621 	case WM_T_82574:
   9622 	case WM_T_82583:
   9623 		rv = wm_check_mng_mode_82574(sc);
   9624 		break;
   9625 	case WM_T_82571:
   9626 	case WM_T_82572:
   9627 	case WM_T_82573:
   9628 	case WM_T_80003:
   9629 		rv = wm_check_mng_mode_generic(sc);
   9630 		break;
   9631 	default:
   9632 		/* noting to do */
   9633 		rv = 0;
   9634 		break;
   9635 	}
   9636 
   9637 	return rv;
   9638 }
   9639 
   9640 static int
   9641 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   9642 {
   9643 	uint32_t fwsm;
   9644 
   9645 	fwsm = CSR_READ(sc, WMREG_FWSM);
   9646 
   9647 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   9648 		return 1;
   9649 
   9650 	return 0;
   9651 }
   9652 
   9653 static int
   9654 wm_check_mng_mode_82574(struct wm_softc *sc)
   9655 {
   9656 	uint16_t data;
   9657 
   9658 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   9659 
   9660 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   9661 		return 1;
   9662 
   9663 	return 0;
   9664 }
   9665 
   9666 static int
   9667 wm_check_mng_mode_generic(struct wm_softc *sc)
   9668 {
   9669 	uint32_t fwsm;
   9670 
   9671 	fwsm = CSR_READ(sc, WMREG_FWSM);
   9672 
   9673 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   9674 		return 1;
   9675 
   9676 	return 0;
   9677 }
   9678 
   9679 static int
   9680 wm_enable_mng_pass_thru(struct wm_softc *sc)
   9681 {
   9682 	uint32_t manc, fwsm, factps;
   9683 
   9684 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   9685 		return 0;
   9686 
   9687 	manc = CSR_READ(sc, WMREG_MANC);
   9688 
   9689 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   9690 		device_xname(sc->sc_dev), manc));
   9691 	if ((manc & MANC_RECV_TCO_EN) == 0)
   9692 		return 0;
   9693 
   9694 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   9695 		fwsm = CSR_READ(sc, WMREG_FWSM);
   9696 		factps = CSR_READ(sc, WMREG_FACTPS);
   9697 		if (((factps & FACTPS_MNGCG) == 0)
   9698 		    && ((fwsm & FWSM_MODE_MASK)
   9699 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   9700 			return 1;
   9701 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9702 		uint16_t data;
   9703 
   9704 		factps = CSR_READ(sc, WMREG_FACTPS);
   9705 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   9706 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   9707 			device_xname(sc->sc_dev), factps, data));
   9708 		if (((factps & FACTPS_MNGCG) == 0)
   9709 		    && ((data & NVM_CFG2_MNGM_MASK)
   9710 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   9711 			return 1;
   9712 	} else if (((manc & MANC_SMBUS_EN) != 0)
   9713 	    && ((manc & MANC_ASF_EN) == 0))
   9714 		return 1;
   9715 
   9716 	return 0;
   9717 }
   9718 
   9719 static int
   9720 wm_check_reset_block(struct wm_softc *sc)
   9721 {
   9722 	uint32_t reg;
   9723 
   9724 	switch (sc->sc_type) {
   9725 	case WM_T_ICH8:
   9726 	case WM_T_ICH9:
   9727 	case WM_T_ICH10:
   9728 	case WM_T_PCH:
   9729 	case WM_T_PCH2:
   9730 	case WM_T_PCH_LPT:
   9731 		reg = CSR_READ(sc, WMREG_FWSM);
   9732 		if ((reg & FWSM_RSPCIPHY) != 0)
   9733 			return 0;
   9734 		else
   9735 			return -1;
   9736 		break;
   9737 	case WM_T_82571:
   9738 	case WM_T_82572:
   9739 	case WM_T_82573:
   9740 	case WM_T_82574:
   9741 	case WM_T_82583:
   9742 	case WM_T_80003:
   9743 		reg = CSR_READ(sc, WMREG_MANC);
   9744 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   9745 			return -1;
   9746 		else
   9747 			return 0;
   9748 		break;
   9749 	default:
   9750 		/* no problem */
   9751 		break;
   9752 	}
   9753 
   9754 	return 0;
   9755 }
   9756 
   9757 static void
   9758 wm_get_hw_control(struct wm_softc *sc)
   9759 {
   9760 	uint32_t reg;
   9761 
   9762 	switch (sc->sc_type) {
   9763 	case WM_T_82573:
   9764 		reg = CSR_READ(sc, WMREG_SWSM);
   9765 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   9766 		break;
   9767 	case WM_T_82571:
   9768 	case WM_T_82572:
   9769 	case WM_T_82574:
   9770 	case WM_T_82583:
   9771 	case WM_T_80003:
   9772 	case WM_T_ICH8:
   9773 	case WM_T_ICH9:
   9774 	case WM_T_ICH10:
   9775 	case WM_T_PCH:
   9776 	case WM_T_PCH2:
   9777 	case WM_T_PCH_LPT:
   9778 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9779 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   9780 		break;
   9781 	default:
   9782 		break;
   9783 	}
   9784 }
   9785 
   9786 static void
   9787 wm_release_hw_control(struct wm_softc *sc)
   9788 {
   9789 	uint32_t reg;
   9790 
   9791 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   9792 		return;
   9793 
   9794 	if (sc->sc_type == WM_T_82573) {
   9795 		reg = CSR_READ(sc, WMREG_SWSM);
   9796 		reg &= ~SWSM_DRV_LOAD;
   9797 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   9798 	} else {
   9799 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9800 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   9801 	}
   9802 }
   9803 
   9804 static void
   9805 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   9806 {
   9807 	uint32_t reg;
   9808 
   9809 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9810 
   9811 	if (on != 0)
   9812 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   9813 	else
   9814 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   9815 
   9816 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   9817 }
   9818 
   9819 static void
   9820 wm_smbustopci(struct wm_softc *sc)
   9821 {
   9822 	uint32_t fwsm;
   9823 
   9824 	fwsm = CSR_READ(sc, WMREG_FWSM);
   9825 	if (((fwsm & FWSM_FW_VALID) == 0)
   9826 	    && ((wm_check_reset_block(sc) == 0))) {
   9827 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   9828 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   9829 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9830 		CSR_WRITE_FLUSH(sc);
   9831 		delay(10);
   9832 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   9833 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9834 		CSR_WRITE_FLUSH(sc);
   9835 		delay(50*1000);
   9836 
   9837 		/*
   9838 		 * Gate automatic PHY configuration by hardware on non-managed
   9839 		 * 82579
   9840 		 */
   9841 		if (sc->sc_type == WM_T_PCH2)
   9842 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   9843 	}
   9844 }
   9845 
   9846 static void
   9847 wm_init_manageability(struct wm_softc *sc)
   9848 {
   9849 
   9850 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   9851 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   9852 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   9853 
   9854 		/* Disable hardware interception of ARP */
   9855 		manc &= ~MANC_ARP_EN;
   9856 
   9857 		/* Enable receiving management packets to the host */
   9858 		if (sc->sc_type >= WM_T_82571) {
   9859 			manc |= MANC_EN_MNG2HOST;
   9860 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   9861 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   9862 		}
   9863 
   9864 		CSR_WRITE(sc, WMREG_MANC, manc);
   9865 	}
   9866 }
   9867 
   9868 static void
   9869 wm_release_manageability(struct wm_softc *sc)
   9870 {
   9871 
   9872 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   9873 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   9874 
   9875 		manc |= MANC_ARP_EN;
   9876 		if (sc->sc_type >= WM_T_82571)
   9877 			manc &= ~MANC_EN_MNG2HOST;
   9878 
   9879 		CSR_WRITE(sc, WMREG_MANC, manc);
   9880 	}
   9881 }
   9882 
   9883 static void
   9884 wm_get_wakeup(struct wm_softc *sc)
   9885 {
   9886 
   9887 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   9888 	switch (sc->sc_type) {
   9889 	case WM_T_82573:
   9890 	case WM_T_82583:
   9891 		sc->sc_flags |= WM_F_HAS_AMT;
   9892 		/* FALLTHROUGH */
   9893 	case WM_T_80003:
   9894 	case WM_T_82541:
   9895 	case WM_T_82547:
   9896 	case WM_T_82571:
   9897 	case WM_T_82572:
   9898 	case WM_T_82574:
   9899 	case WM_T_82575:
   9900 	case WM_T_82576:
   9901 	case WM_T_82580:
   9902 	case WM_T_I350:
   9903 	case WM_T_I354:
   9904 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   9905 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   9906 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   9907 		break;
   9908 	case WM_T_ICH8:
   9909 	case WM_T_ICH9:
   9910 	case WM_T_ICH10:
   9911 	case WM_T_PCH:
   9912 	case WM_T_PCH2:
   9913 	case WM_T_PCH_LPT:
   9914 		sc->sc_flags |= WM_F_HAS_AMT;
   9915 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   9916 		break;
   9917 	default:
   9918 		break;
   9919 	}
   9920 
   9921 	/* 1: HAS_MANAGE */
   9922 	if (wm_enable_mng_pass_thru(sc) != 0)
   9923 		sc->sc_flags |= WM_F_HAS_MANAGE;
   9924 
   9925 #ifdef WM_DEBUG
   9926 	printf("\n");
   9927 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   9928 		printf("HAS_AMT,");
   9929 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   9930 		printf("ARC_SUBSYS_VALID,");
   9931 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   9932 		printf("ASF_FIRMWARE_PRES,");
   9933 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   9934 		printf("HAS_MANAGE,");
   9935 	printf("\n");
   9936 #endif
   9937 	/*
   9938 	 * Note that the WOL flags is set after the resetting of the eeprom
   9939 	 * stuff
   9940 	 */
   9941 }
   9942 
   9943 #ifdef WM_WOL
   9944 /* WOL in the newer chipset interfaces (pchlan) */
   9945 static void
   9946 wm_enable_phy_wakeup(struct wm_softc *sc)
   9947 {
   9948 #if 0
   9949 	uint16_t preg;
   9950 
   9951 	/* Copy MAC RARs to PHY RARs */
   9952 
   9953 	/* Copy MAC MTA to PHY MTA */
   9954 
   9955 	/* Configure PHY Rx Control register */
   9956 
   9957 	/* Enable PHY wakeup in MAC register */
   9958 
   9959 	/* Configure and enable PHY wakeup in PHY registers */
   9960 
   9961 	/* Activate PHY wakeup */
   9962 
   9963 	/* XXX */
   9964 #endif
   9965 }
   9966 
   9967 /* Power down workaround on D3 */
   9968 static void
   9969 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   9970 {
   9971 	uint32_t reg;
   9972 	int i;
   9973 
   9974 	for (i = 0; i < 2; i++) {
   9975 		/* Disable link */
   9976 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9977 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   9978 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9979 
   9980 		/*
   9981 		 * Call gig speed drop workaround on Gig disable before
   9982 		 * accessing any PHY registers
   9983 		 */
   9984 		if (sc->sc_type == WM_T_ICH8)
   9985 			wm_gig_downshift_workaround_ich8lan(sc);
   9986 
   9987 		/* Write VR power-down enable */
   9988 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   9989 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   9990 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   9991 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   9992 
   9993 		/* Read it back and test */
   9994 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   9995 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   9996 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   9997 			break;
   9998 
   9999 		/* Issue PHY reset and repeat at most one more time */
   10000 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10001 	}
   10002 }
   10003 
   10004 static void
   10005 wm_enable_wakeup(struct wm_softc *sc)
   10006 {
   10007 	uint32_t reg, pmreg;
   10008 	pcireg_t pmode;
   10009 
   10010 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   10011 		&pmreg, NULL) == 0)
   10012 		return;
   10013 
   10014 	/* Advertise the wakeup capability */
   10015 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   10016 	    | CTRL_SWDPIN(3));
   10017 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   10018 
   10019 	/* ICH workaround */
   10020 	switch (sc->sc_type) {
   10021 	case WM_T_ICH8:
   10022 	case WM_T_ICH9:
   10023 	case WM_T_ICH10:
   10024 	case WM_T_PCH:
   10025 	case WM_T_PCH2:
   10026 	case WM_T_PCH_LPT:
   10027 		/* Disable gig during WOL */
   10028 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10029 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   10030 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10031 		if (sc->sc_type == WM_T_PCH)
   10032 			wm_gmii_reset(sc);
   10033 
   10034 		/* Power down workaround */
   10035 		if (sc->sc_phytype == WMPHY_82577) {
   10036 			struct mii_softc *child;
   10037 
   10038 			/* Assume that the PHY is copper */
   10039 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10040 			if (child->mii_mpd_rev <= 2)
   10041 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   10042 				    (768 << 5) | 25, 0x0444); /* magic num */
   10043 		}
   10044 		break;
   10045 	default:
   10046 		break;
   10047 	}
   10048 
   10049 	/* Keep the laser running on fiber adapters */
   10050 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   10051 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   10052 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10053 		reg |= CTRL_EXT_SWDPIN(3);
   10054 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10055 	}
   10056 
   10057 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   10058 #if 0	/* for the multicast packet */
   10059 	reg |= WUFC_MC;
   10060 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   10061 #endif
   10062 
   10063 	if (sc->sc_type == WM_T_PCH) {
   10064 		wm_enable_phy_wakeup(sc);
   10065 	} else {
   10066 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   10067 		CSR_WRITE(sc, WMREG_WUFC, reg);
   10068 	}
   10069 
   10070 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10071 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10072 		|| (sc->sc_type == WM_T_PCH2))
   10073 		    && (sc->sc_phytype == WMPHY_IGP_3))
   10074 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   10075 
   10076 	/* Request PME */
   10077 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   10078 #if 0
   10079 	/* Disable WOL */
   10080 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   10081 #else
   10082 	/* For WOL */
   10083 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   10084 #endif
   10085 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   10086 }
   10087 #endif /* WM_WOL */
   10088 
   10089 /* EEE */
   10090 
   10091 static void
   10092 wm_set_eee_i350(struct wm_softc *sc)
   10093 {
   10094 	uint32_t ipcnfg, eeer;
   10095 
   10096 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   10097 	eeer = CSR_READ(sc, WMREG_EEER);
   10098 
   10099 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   10100 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   10101 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   10102 		    | EEER_LPI_FC);
   10103 	} else {
   10104 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   10105 		ipcnfg &= ~IPCNFG_10BASE_TE;
   10106 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   10107 		    | EEER_LPI_FC);
   10108 	}
   10109 
   10110 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   10111 	CSR_WRITE(sc, WMREG_EEER, eeer);
   10112 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   10113 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   10114 }
   10115 
   10116 /*
   10117  * Workarounds (mainly PHY related).
   10118  * Basically, PHY's workarounds are in the PHY drivers.
   10119  */
   10120 
   10121 /* Work-around for 82566 Kumeran PCS lock loss */
   10122 static void
   10123 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   10124 {
   10125 	int miistatus, active, i;
   10126 	int reg;
   10127 
   10128 	miistatus = sc->sc_mii.mii_media_status;
   10129 
   10130 	/* If the link is not up, do nothing */
   10131 	if ((miistatus & IFM_ACTIVE) != 0)
   10132 		return;
   10133 
   10134 	active = sc->sc_mii.mii_media_active;
   10135 
   10136 	/* Nothing to do if the link is other than 1Gbps */
   10137 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   10138 		return;
   10139 
   10140 	for (i = 0; i < 10; i++) {
   10141 		/* read twice */
   10142 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   10143 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   10144 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   10145 			goto out;	/* GOOD! */
   10146 
   10147 		/* Reset the PHY */
   10148 		wm_gmii_reset(sc);
   10149 		delay(5*1000);
   10150 	}
   10151 
   10152 	/* Disable GigE link negotiation */
   10153 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10154 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   10155 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10156 
   10157 	/*
   10158 	 * Call gig speed drop workaround on Gig disable before accessing
   10159 	 * any PHY registers.
   10160 	 */
   10161 	wm_gig_downshift_workaround_ich8lan(sc);
   10162 
   10163 out:
   10164 	return;
   10165 }
   10166 
   10167 /* WOL from S5 stops working */
   10168 static void
   10169 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   10170 {
   10171 	uint16_t kmrn_reg;
   10172 
   10173 	/* Only for igp3 */
   10174 	if (sc->sc_phytype == WMPHY_IGP_3) {
   10175 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   10176 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   10177 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   10178 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   10179 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   10180 	}
   10181 }
   10182 
   10183 /*
   10184  * Workaround for pch's PHYs
   10185  * XXX should be moved to new PHY driver?
   10186  */
   10187 static void
   10188 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   10189 {
   10190 	if (sc->sc_phytype == WMPHY_82577)
   10191 		wm_set_mdio_slow_mode_hv(sc);
   10192 
   10193 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   10194 
   10195 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   10196 
   10197 	/* 82578 */
   10198 	if (sc->sc_phytype == WMPHY_82578) {
   10199 		/* PCH rev. < 3 */
   10200 		if (sc->sc_rev < 3) {
   10201 			/* XXX 6 bit shift? Why? Is it page2? */
   10202 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   10203 			    0x66c0);
   10204 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   10205 			    0xffff);
   10206 		}
   10207 
   10208 		/* XXX phy rev. < 2 */
   10209 	}
   10210 
   10211 	/* Select page 0 */
   10212 
   10213 	/* XXX acquire semaphore */
   10214 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   10215 	/* XXX release semaphore */
   10216 
   10217 	/*
   10218 	 * Configure the K1 Si workaround during phy reset assuming there is
   10219 	 * link so that it disables K1 if link is in 1Gbps.
   10220 	 */
   10221 	wm_k1_gig_workaround_hv(sc, 1);
   10222 }
   10223 
   10224 static void
   10225 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   10226 {
   10227 
   10228 	wm_set_mdio_slow_mode_hv(sc);
   10229 }
   10230 
   10231 static void
   10232 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   10233 {
   10234 	int k1_enable = sc->sc_nvm_k1_enabled;
   10235 
   10236 	/* XXX acquire semaphore */
   10237 
   10238 	if (link) {
   10239 		k1_enable = 0;
   10240 
   10241 		/* Link stall fix for link up */
   10242 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   10243 	} else {
   10244 		/* Link stall fix for link down */
   10245 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   10246 	}
   10247 
   10248 	wm_configure_k1_ich8lan(sc, k1_enable);
   10249 
   10250 	/* XXX release semaphore */
   10251 }
   10252 
   10253 static void
   10254 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   10255 {
   10256 	uint32_t reg;
   10257 
   10258 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   10259 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   10260 	    reg | HV_KMRN_MDIO_SLOW);
   10261 }
   10262 
   10263 static void
   10264 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   10265 {
   10266 	uint32_t ctrl, ctrl_ext, tmp;
   10267 	uint16_t kmrn_reg;
   10268 
   10269 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   10270 
   10271 	if (k1_enable)
   10272 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   10273 	else
   10274 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   10275 
   10276 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   10277 
   10278 	delay(20);
   10279 
   10280 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10281 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10282 
   10283 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   10284 	tmp |= CTRL_FRCSPD;
   10285 
   10286 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   10287 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   10288 	CSR_WRITE_FLUSH(sc);
   10289 	delay(20);
   10290 
   10291 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   10292 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10293 	CSR_WRITE_FLUSH(sc);
   10294 	delay(20);
   10295 }
   10296 
   10297 /* special case - for 82575 - need to do manual init ... */
   10298 static void
   10299 wm_reset_init_script_82575(struct wm_softc *sc)
   10300 {
   10301 	/*
   10302 	 * remark: this is untested code - we have no board without EEPROM
   10303 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   10304 	 */
   10305 
   10306 	/* SerDes configuration via SERDESCTRL */
   10307 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   10308 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   10309 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   10310 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   10311 
   10312 	/* CCM configuration via CCMCTL register */
   10313 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   10314 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   10315 
   10316 	/* PCIe lanes configuration */
   10317 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   10318 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   10319 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   10320 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   10321 
   10322 	/* PCIe PLL Configuration */
   10323 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   10324 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   10325 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   10326 }
   10327 
   10328 static void
   10329 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   10330 {
   10331 	uint32_t reg;
   10332 	uint16_t nvmword;
   10333 	int rv;
   10334 
   10335 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10336 		return;
   10337 
   10338 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   10339 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   10340 	if (rv != 0) {
   10341 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   10342 		    __func__);
   10343 		return;
   10344 	}
   10345 
   10346 	reg = CSR_READ(sc, WMREG_MDICNFG);
   10347 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   10348 		reg |= MDICNFG_DEST;
   10349 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   10350 		reg |= MDICNFG_COM_MDIO;
   10351 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   10352 }
   10353 
   10354 /*
   10355  * I210 Errata 25 and I211 Errata 10
   10356  * Slow System Clock.
   10357  */
   10358 static void
   10359 wm_pll_workaround_i210(struct wm_softc *sc)
   10360 {
   10361 	uint32_t mdicnfg, wuc;
   10362 	uint32_t reg;
   10363 	pcireg_t pcireg;
   10364 	uint32_t pmreg;
   10365 	uint16_t nvmword, tmp_nvmword;
   10366 	int phyval;
   10367 	bool wa_done = false;
   10368 	int i;
   10369 
   10370 	/* Save WUC and MDICNFG registers */
   10371 	wuc = CSR_READ(sc, WMREG_WUC);
   10372 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   10373 
   10374 	reg = mdicnfg & ~MDICNFG_DEST;
   10375 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   10376 
   10377 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   10378 		nvmword = INVM_DEFAULT_AL;
   10379 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   10380 
   10381 	/* Get Power Management cap offset */
   10382 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   10383 		&pmreg, NULL) == 0)
   10384 		return;
   10385 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   10386 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   10387 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   10388 
   10389 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   10390 			break; /* OK */
   10391 		}
   10392 
   10393 		wa_done = true;
   10394 		/* Directly reset the internal PHY */
   10395 		reg = CSR_READ(sc, WMREG_CTRL);
   10396 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   10397 
   10398 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10399 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   10400 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10401 
   10402 		CSR_WRITE(sc, WMREG_WUC, 0);
   10403 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   10404 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   10405 
   10406 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   10407 		    pmreg + PCI_PMCSR);
   10408 		pcireg |= PCI_PMCSR_STATE_D3;
   10409 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   10410 		    pmreg + PCI_PMCSR, pcireg);
   10411 		delay(1000);
   10412 		pcireg &= ~PCI_PMCSR_STATE_D3;
   10413 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   10414 		    pmreg + PCI_PMCSR, pcireg);
   10415 
   10416 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   10417 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   10418 
   10419 		/* Restore WUC register */
   10420 		CSR_WRITE(sc, WMREG_WUC, wuc);
   10421 	}
   10422 
   10423 	/* Restore MDICNFG setting */
   10424 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   10425 	if (wa_done)
   10426 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   10427 }
   10428