Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.332
      1 /*	$NetBSD: if_wm.c,v 1.332 2015/06/08 03:45:19 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- EEE (Energy Efficiency Ethernet)
     77  *	- MSI/MSI-X
     78  *	- Virtual Function
     79  *	- Set LED correctly (based on contents in EEPROM)
     80  *	- Rework how parameters are loaded from the EEPROM.
     81  */
     82 
     83 #include <sys/cdefs.h>
     84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.332 2015/06/08 03:45:19 msaitoh Exp $");
     85 
     86 #ifdef _KERNEL_OPT
     87 #include "opt_net_mpsafe.h"
     88 #endif
     89 
     90 #include <sys/param.h>
     91 #include <sys/systm.h>
     92 #include <sys/callout.h>
     93 #include <sys/mbuf.h>
     94 #include <sys/malloc.h>
     95 #include <sys/kernel.h>
     96 #include <sys/socket.h>
     97 #include <sys/ioctl.h>
     98 #include <sys/errno.h>
     99 #include <sys/device.h>
    100 #include <sys/queue.h>
    101 #include <sys/syslog.h>
    102 
    103 #include <sys/rndsource.h>
    104 
    105 #include <net/if.h>
    106 #include <net/if_dl.h>
    107 #include <net/if_media.h>
    108 #include <net/if_ether.h>
    109 
    110 #include <net/bpf.h>
    111 
    112 #include <netinet/in.h>			/* XXX for struct ip */
    113 #include <netinet/in_systm.h>		/* XXX for struct ip */
    114 #include <netinet/ip.h>			/* XXX for struct ip */
    115 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    116 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    117 
    118 #include <sys/bus.h>
    119 #include <sys/intr.h>
    120 #include <machine/endian.h>
    121 
    122 #include <dev/mii/mii.h>
    123 #include <dev/mii/miivar.h>
    124 #include <dev/mii/miidevs.h>
    125 #include <dev/mii/mii_bitbang.h>
    126 #include <dev/mii/ikphyreg.h>
    127 #include <dev/mii/igphyreg.h>
    128 #include <dev/mii/igphyvar.h>
    129 #include <dev/mii/inbmphyreg.h>
    130 
    131 #include <dev/pci/pcireg.h>
    132 #include <dev/pci/pcivar.h>
    133 #include <dev/pci/pcidevs.h>
    134 
    135 #include <dev/pci/if_wmreg.h>
    136 #include <dev/pci/if_wmvar.h>
    137 
    138 #ifdef WM_DEBUG
    139 #define	WM_DEBUG_LINK		0x01
    140 #define	WM_DEBUG_TX		0x02
    141 #define	WM_DEBUG_RX		0x04
    142 #define	WM_DEBUG_GMII		0x08
    143 #define	WM_DEBUG_MANAGE		0x10
    144 #define	WM_DEBUG_NVM		0x20
    145 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    146     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    147 
    148 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    149 #else
    150 #define	DPRINTF(x, y)	/* nothing */
    151 #endif /* WM_DEBUG */
    152 
    153 #ifdef NET_MPSAFE
    154 #define WM_MPSAFE	1
    155 #endif
    156 
    157 /*
    158  * Transmit descriptor list size.  Due to errata, we can only have
    159  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    160  * on >= 82544.  We tell the upper layers that they can queue a lot
    161  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    162  * of them at a time.
    163  *
    164  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    165  * chains containing many small mbufs have been observed in zero-copy
    166  * situations with jumbo frames.
    167  */
    168 #define	WM_NTXSEGS		256
    169 #define	WM_IFQUEUELEN		256
    170 #define	WM_TXQUEUELEN_MAX	64
    171 #define	WM_TXQUEUELEN_MAX_82547	16
    172 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    173 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    174 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    175 #define	WM_NTXDESC_82542	256
    176 #define	WM_NTXDESC_82544	4096
    177 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    178 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    179 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    180 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    181 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    182 
    183 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    184 
    185 /*
    186  * Receive descriptor list size.  We have one Rx buffer for normal
    187  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    188  * packet.  We allocate 256 receive descriptors, each with a 2k
    189  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    190  */
    191 #define	WM_NRXDESC		256
    192 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    193 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    194 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    195 
    196 /*
    197  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    198  * a single clump that maps to a single DMA segment to make several things
    199  * easier.
    200  */
    201 struct wm_control_data_82544 {
    202 	/*
    203 	 * The receive descriptors.
    204 	 */
    205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    206 
    207 	/*
    208 	 * The transmit descriptors.  Put these at the end, because
    209 	 * we might use a smaller number of them.
    210 	 */
    211 	union {
    212 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    213 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    214 	} wdc_u;
    215 };
    216 
    217 struct wm_control_data_82542 {
    218 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    219 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    220 };
    221 
    222 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    223 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    224 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    225 
    226 /*
    227  * Software state for transmit jobs.
    228  */
    229 struct wm_txsoft {
    230 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    231 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    232 	int txs_firstdesc;		/* first descriptor in packet */
    233 	int txs_lastdesc;		/* last descriptor in packet */
    234 	int txs_ndesc;			/* # of descriptors used */
    235 };
    236 
    237 /*
    238  * Software state for receive buffers.  Each descriptor gets a
    239  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    240  * more than one buffer, we chain them together.
    241  */
    242 struct wm_rxsoft {
    243 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    244 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    245 };
    246 
    247 #define WM_LINKUP_TIMEOUT	50
    248 
    249 static uint16_t swfwphysem[] = {
    250 	SWFW_PHY0_SM,
    251 	SWFW_PHY1_SM,
    252 	SWFW_PHY2_SM,
    253 	SWFW_PHY3_SM
    254 };
    255 
    256 static const uint32_t wm_82580_rxpbs_table[] = {
    257 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    258 };
    259 
    260 /*
    261  * Software state per device.
    262  */
    263 struct wm_softc {
    264 	device_t sc_dev;		/* generic device information */
    265 	bus_space_tag_t sc_st;		/* bus space tag */
    266 	bus_space_handle_t sc_sh;	/* bus space handle */
    267 	bus_size_t sc_ss;		/* bus space size */
    268 	bus_space_tag_t sc_iot;		/* I/O space tag */
    269 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    270 	bus_size_t sc_ios;		/* I/O space size */
    271 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    272 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    273 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    274 
    275 	struct ethercom sc_ethercom;	/* ethernet common data */
    276 	struct mii_data sc_mii;		/* MII/media information */
    277 
    278 	pci_chipset_tag_t sc_pc;
    279 	pcitag_t sc_pcitag;
    280 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    281 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    282 
    283 	uint16_t sc_pcidevid;		/* PCI device ID */
    284 	wm_chip_type sc_type;		/* MAC type */
    285 	int sc_rev;			/* MAC revision */
    286 	wm_phy_type sc_phytype;		/* PHY type */
    287 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    288 #define	WM_MEDIATYPE_UNKNOWN		0x00
    289 #define	WM_MEDIATYPE_FIBER		0x01
    290 #define	WM_MEDIATYPE_COPPER		0x02
    291 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    292 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    293 	int sc_flags;			/* flags; see below */
    294 	int sc_if_flags;		/* last if_flags */
    295 	int sc_flowflags;		/* 802.3x flow control flags */
    296 	int sc_align_tweak;
    297 
    298 	void *sc_ih;			/* interrupt cookie */
    299 	callout_t sc_tick_ch;		/* tick callout */
    300 	bool sc_stopping;
    301 
    302 	int sc_nvm_ver_major;
    303 	int sc_nvm_ver_minor;
    304 	int sc_nvm_addrbits;		/* NVM address bits */
    305 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    306 	int sc_ich8_flash_base;
    307 	int sc_ich8_flash_bank_size;
    308 	int sc_nvm_k1_enabled;
    309 
    310 	/* Software state for the transmit and receive descriptors. */
    311 	int sc_txnum;			/* must be a power of two */
    312 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    313 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    314 
    315 	/* Control data structures. */
    316 	int sc_ntxdesc;			/* must be a power of two */
    317 	struct wm_control_data_82544 *sc_control_data;
    318 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    319 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    320 	int sc_cd_rseg;			/* real number of control segment */
    321 	size_t sc_cd_size;		/* control data size */
    322 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    323 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    324 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    325 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    326 
    327 #ifdef WM_EVENT_COUNTERS
    328 	/* Event counters. */
    329 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    330 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    331 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    332 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    333 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    334 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    335 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    336 
    337 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    338 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    339 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    340 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    341 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    342 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    343 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    344 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    345 
    346 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    347 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    348 
    349 	struct evcnt sc_ev_tu;		/* Tx underrun */
    350 
    351 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    352 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    353 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    354 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    355 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    356 #endif /* WM_EVENT_COUNTERS */
    357 
    358 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    359 
    360 	int	sc_txfree;		/* number of free Tx descriptors */
    361 	int	sc_txnext;		/* next ready Tx descriptor */
    362 
    363 	int	sc_txsfree;		/* number of free Tx jobs */
    364 	int	sc_txsnext;		/* next free Tx job */
    365 	int	sc_txsdirty;		/* dirty Tx jobs */
    366 
    367 	/* These 5 variables are used only on the 82547. */
    368 	int	sc_txfifo_size;		/* Tx FIFO size */
    369 	int	sc_txfifo_head;		/* current head of FIFO */
    370 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    371 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    372 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    373 
    374 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    375 
    376 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    377 	int	sc_rxdiscard;
    378 	int	sc_rxlen;
    379 	struct mbuf *sc_rxhead;
    380 	struct mbuf *sc_rxtail;
    381 	struct mbuf **sc_rxtailp;
    382 
    383 	uint32_t sc_ctrl;		/* prototype CTRL register */
    384 #if 0
    385 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    386 #endif
    387 	uint32_t sc_icr;		/* prototype interrupt bits */
    388 	uint32_t sc_itr;		/* prototype intr throttling reg */
    389 	uint32_t sc_tctl;		/* prototype TCTL register */
    390 	uint32_t sc_rctl;		/* prototype RCTL register */
    391 	uint32_t sc_txcw;		/* prototype TXCW register */
    392 	uint32_t sc_tipg;		/* prototype TIPG register */
    393 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    394 	uint32_t sc_pba;		/* prototype PBA register */
    395 
    396 	int sc_tbi_linkup;		/* TBI link status */
    397 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    398 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    399 
    400 	int sc_mchash_type;		/* multicast filter offset */
    401 
    402 	krndsource_t rnd_source;	/* random source */
    403 
    404 	kmutex_t *sc_tx_lock;		/* lock for tx operations */
    405 	kmutex_t *sc_rx_lock;		/* lock for rx operations */
    406 };
    407 
    408 #define WM_TX_LOCK(_sc)		if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
    409 #define WM_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
    410 #define WM_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
    411 #define WM_RX_LOCK(_sc)		if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
    412 #define WM_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
    413 #define WM_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
    414 #define WM_BOTH_LOCK(_sc)	do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
    415 #define WM_BOTH_UNLOCK(_sc)	do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
    416 #define WM_BOTH_LOCKED(_sc)	(WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
    417 
    418 #ifdef WM_MPSAFE
    419 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    420 #else
    421 #define CALLOUT_FLAGS	0
    422 #endif
    423 
    424 #define	WM_RXCHAIN_RESET(sc)						\
    425 do {									\
    426 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    427 	*(sc)->sc_rxtailp = NULL;					\
    428 	(sc)->sc_rxlen = 0;						\
    429 } while (/*CONSTCOND*/0)
    430 
    431 #define	WM_RXCHAIN_LINK(sc, m)						\
    432 do {									\
    433 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    434 	(sc)->sc_rxtailp = &(m)->m_next;				\
    435 } while (/*CONSTCOND*/0)
    436 
    437 #ifdef WM_EVENT_COUNTERS
    438 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    439 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    440 #else
    441 #define	WM_EVCNT_INCR(ev)	/* nothing */
    442 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    443 #endif
    444 
    445 #define	CSR_READ(sc, reg)						\
    446 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    447 #define	CSR_WRITE(sc, reg, val)						\
    448 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    449 #define	CSR_WRITE_FLUSH(sc)						\
    450 	(void) CSR_READ((sc), WMREG_STATUS)
    451 
    452 #define ICH8_FLASH_READ32(sc, reg) \
    453 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    454 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    455 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    456 
    457 #define ICH8_FLASH_READ16(sc, reg) \
    458 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    459 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    460 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    461 
    462 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    463 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    464 
    465 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    466 #define	WM_CDTXADDR_HI(sc, x)						\
    467 	(sizeof(bus_addr_t) == 8 ?					\
    468 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    469 
    470 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    471 #define	WM_CDRXADDR_HI(sc, x)						\
    472 	(sizeof(bus_addr_t) == 8 ?					\
    473 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    474 
    475 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    476 do {									\
    477 	int __x, __n;							\
    478 									\
    479 	__x = (x);							\
    480 	__n = (n);							\
    481 									\
    482 	/* If it will wrap around, sync to the end of the ring. */	\
    483 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    484 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    485 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    486 		    (WM_NTXDESC(sc) - __x), (ops));			\
    487 		__n -= (WM_NTXDESC(sc) - __x);				\
    488 		__x = 0;						\
    489 	}								\
    490 									\
    491 	/* Now sync whatever is left. */				\
    492 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    493 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    494 } while (/*CONSTCOND*/0)
    495 
    496 #define	WM_CDRXSYNC(sc, x, ops)						\
    497 do {									\
    498 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    499 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    500 } while (/*CONSTCOND*/0)
    501 
    502 #define	WM_INIT_RXDESC(sc, x)						\
    503 do {									\
    504 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    505 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    506 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    507 									\
    508 	/*								\
    509 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    510 	 * so that the payload after the Ethernet header is aligned	\
    511 	 * to a 4-byte boundary.					\
    512 	 *								\
    513 	 * XXX BRAINDAMAGE ALERT!					\
    514 	 * The stupid chip uses the same size for every buffer, which	\
    515 	 * is set in the Receive Control register.  We are using the 2K	\
    516 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    517 	 * reason, we can't "scoot" packets longer than the standard	\
    518 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    519 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    520 	 * the upper layer copy the headers.				\
    521 	 */								\
    522 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    523 									\
    524 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    525 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    526 	__rxd->wrx_len = 0;						\
    527 	__rxd->wrx_cksum = 0;						\
    528 	__rxd->wrx_status = 0;						\
    529 	__rxd->wrx_errors = 0;						\
    530 	__rxd->wrx_special = 0;						\
    531 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    532 									\
    533 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    534 } while (/*CONSTCOND*/0)
    535 
    536 /*
    537  * Register read/write functions.
    538  * Other than CSR_{READ|WRITE}().
    539  */
    540 #if 0
    541 static inline uint32_t wm_io_read(struct wm_softc *, int);
    542 #endif
    543 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    544 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    545 	uint32_t, uint32_t);
    546 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    547 
    548 /*
    549  * Device driver interface functions and commonly used functions.
    550  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    551  */
    552 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    553 static int	wm_match(device_t, cfdata_t, void *);
    554 static void	wm_attach(device_t, device_t, void *);
    555 static int	wm_detach(device_t, int);
    556 static bool	wm_suspend(device_t, const pmf_qual_t *);
    557 static bool	wm_resume(device_t, const pmf_qual_t *);
    558 static void	wm_watchdog(struct ifnet *);
    559 static void	wm_tick(void *);
    560 static int	wm_ifflags_cb(struct ethercom *);
    561 static int	wm_ioctl(struct ifnet *, u_long, void *);
    562 /* MAC address related */
    563 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    564 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    565 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    566 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    567 static void	wm_set_filter(struct wm_softc *);
    568 /* Reset and init related */
    569 static void	wm_set_vlan(struct wm_softc *);
    570 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    571 static void	wm_get_auto_rd_done(struct wm_softc *);
    572 static void	wm_lan_init_done(struct wm_softc *);
    573 static void	wm_get_cfg_done(struct wm_softc *);
    574 static void	wm_initialize_hardware_bits(struct wm_softc *);
    575 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    576 static void	wm_reset(struct wm_softc *);
    577 static int	wm_add_rxbuf(struct wm_softc *, int);
    578 static void	wm_rxdrain(struct wm_softc *);
    579 static int	wm_init(struct ifnet *);
    580 static int	wm_init_locked(struct ifnet *);
    581 static void	wm_stop(struct ifnet *, int);
    582 static void	wm_stop_locked(struct ifnet *, int);
    583 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    584     uint32_t *, uint8_t *);
    585 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    586 static void	wm_82547_txfifo_stall(void *);
    587 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    588 /* Start */
    589 static void	wm_start(struct ifnet *);
    590 static void	wm_start_locked(struct ifnet *);
    591 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    592     uint32_t *, uint32_t *, bool *);
    593 static void	wm_nq_start(struct ifnet *);
    594 static void	wm_nq_start_locked(struct ifnet *);
    595 /* Interrupt */
    596 static void	wm_txintr(struct wm_softc *);
    597 static void	wm_rxintr(struct wm_softc *);
    598 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    599 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    600 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    601 static void	wm_linkintr(struct wm_softc *, uint32_t);
    602 static int	wm_intr(void *);
    603 
    604 /*
    605  * Media related.
    606  * GMII, SGMII, TBI, SERDES and SFP.
    607  */
    608 /* Common */
    609 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    610 /* GMII related */
    611 static void	wm_gmii_reset(struct wm_softc *);
    612 static int	wm_get_phy_id_82575(struct wm_softc *);
    613 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    614 static int	wm_gmii_mediachange(struct ifnet *);
    615 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    616 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    617 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    618 static int	wm_gmii_i82543_readreg(device_t, int, int);
    619 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    620 static int	wm_gmii_i82544_readreg(device_t, int, int);
    621 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    622 static int	wm_gmii_i80003_readreg(device_t, int, int);
    623 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    624 static int	wm_gmii_bm_readreg(device_t, int, int);
    625 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    626 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    627 static int	wm_gmii_hv_readreg(device_t, int, int);
    628 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    629 static int	wm_gmii_82580_readreg(device_t, int, int);
    630 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    631 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    632 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    633 static void	wm_gmii_statchg(struct ifnet *);
    634 static int	wm_kmrn_readreg(struct wm_softc *, int);
    635 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    636 /* SGMII */
    637 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    638 static int	wm_sgmii_readreg(device_t, int, int);
    639 static void	wm_sgmii_writereg(device_t, int, int, int);
    640 /* TBI related */
    641 static void	wm_tbi_mediainit(struct wm_softc *);
    642 static int	wm_tbi_mediachange(struct ifnet *);
    643 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    644 static int	wm_check_for_link(struct wm_softc *);
    645 static void	wm_tbi_tick(struct wm_softc *);
    646 /* SERDES related */
    647 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    648 static int	wm_serdes_mediachange(struct ifnet *);
    649 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    650 static void	wm_serdes_tick(struct wm_softc *);
    651 /* SFP related */
    652 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    653 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    654 
    655 /*
    656  * NVM related.
    657  * Microwire, SPI (w/wo EERD) and Flash.
    658  */
    659 /* Misc functions */
    660 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    661 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    662 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    663 /* Microwire */
    664 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    665 /* SPI */
    666 static int	wm_nvm_ready_spi(struct wm_softc *);
    667 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    668 /* Using with EERD */
    669 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    670 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    671 /* Flash */
    672 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    673     unsigned int *);
    674 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    675 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    676 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    677 	uint16_t *);
    678 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    679 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    680 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    681 /* iNVM */
    682 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    683 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    684 /* Lock, detecting NVM type, validate checksum and read */
    685 static int	wm_nvm_acquire(struct wm_softc *);
    686 static void	wm_nvm_release(struct wm_softc *);
    687 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    688 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    689 static int	wm_nvm_validate_checksum(struct wm_softc *);
    690 static void	wm_nvm_version(struct wm_softc *);
    691 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    692 
    693 /*
    694  * Hardware semaphores.
    695  * Very complexed...
    696  */
    697 static int	wm_get_swsm_semaphore(struct wm_softc *);
    698 static void	wm_put_swsm_semaphore(struct wm_softc *);
    699 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    700 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    701 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    702 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    703 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    704 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    705 
    706 /*
    707  * Management mode and power management related subroutines.
    708  * BMC, AMT, suspend/resume and EEE.
    709  */
    710 static int	wm_check_mng_mode(struct wm_softc *);
    711 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    712 static int	wm_check_mng_mode_82574(struct wm_softc *);
    713 static int	wm_check_mng_mode_generic(struct wm_softc *);
    714 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    715 static int	wm_check_reset_block(struct wm_softc *);
    716 static void	wm_get_hw_control(struct wm_softc *);
    717 static void	wm_release_hw_control(struct wm_softc *);
    718 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    719 static void	wm_smbustopci(struct wm_softc *);
    720 static void	wm_init_manageability(struct wm_softc *);
    721 static void	wm_release_manageability(struct wm_softc *);
    722 static void	wm_get_wakeup(struct wm_softc *);
    723 #ifdef WM_WOL
    724 static void	wm_enable_phy_wakeup(struct wm_softc *);
    725 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    726 static void	wm_enable_wakeup(struct wm_softc *);
    727 #endif
    728 /* EEE */
    729 static void	wm_set_eee_i350(struct wm_softc *);
    730 
    731 /*
    732  * Workarounds (mainly PHY related).
    733  * Basically, PHY's workarounds are in the PHY drivers.
    734  */
    735 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    736 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    737 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    738 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    739 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    740 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    741 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    742 static void	wm_reset_init_script_82575(struct wm_softc *);
    743 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    744 static void	wm_pll_workaround_i210(struct wm_softc *);
    745 
    746 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    747     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    748 
    749 /*
    750  * Devices supported by this driver.
    751  */
    752 static const struct wm_product {
    753 	pci_vendor_id_t		wmp_vendor;
    754 	pci_product_id_t	wmp_product;
    755 	const char		*wmp_name;
    756 	wm_chip_type		wmp_type;
    757 	uint32_t		wmp_flags;
    758 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    759 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    760 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    761 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    762 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    763 } wm_products[] = {
    764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    765 	  "Intel i82542 1000BASE-X Ethernet",
    766 	  WM_T_82542_2_1,	WMP_F_FIBER },
    767 
    768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    769 	  "Intel i82543GC 1000BASE-X Ethernet",
    770 	  WM_T_82543,		WMP_F_FIBER },
    771 
    772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    773 	  "Intel i82543GC 1000BASE-T Ethernet",
    774 	  WM_T_82543,		WMP_F_COPPER },
    775 
    776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    777 	  "Intel i82544EI 1000BASE-T Ethernet",
    778 	  WM_T_82544,		WMP_F_COPPER },
    779 
    780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    781 	  "Intel i82544EI 1000BASE-X Ethernet",
    782 	  WM_T_82544,		WMP_F_FIBER },
    783 
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    785 	  "Intel i82544GC 1000BASE-T Ethernet",
    786 	  WM_T_82544,		WMP_F_COPPER },
    787 
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    789 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    790 	  WM_T_82544,		WMP_F_COPPER },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    793 	  "Intel i82540EM 1000BASE-T Ethernet",
    794 	  WM_T_82540,		WMP_F_COPPER },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    797 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    798 	  WM_T_82540,		WMP_F_COPPER },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    801 	  "Intel i82540EP 1000BASE-T Ethernet",
    802 	  WM_T_82540,		WMP_F_COPPER },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    805 	  "Intel i82540EP 1000BASE-T Ethernet",
    806 	  WM_T_82540,		WMP_F_COPPER },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    809 	  "Intel i82540EP 1000BASE-T Ethernet",
    810 	  WM_T_82540,		WMP_F_COPPER },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    813 	  "Intel i82545EM 1000BASE-T Ethernet",
    814 	  WM_T_82545,		WMP_F_COPPER },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    817 	  "Intel i82545GM 1000BASE-T Ethernet",
    818 	  WM_T_82545_3,		WMP_F_COPPER },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    821 	  "Intel i82545GM 1000BASE-X Ethernet",
    822 	  WM_T_82545_3,		WMP_F_FIBER },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    825 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    826 	  WM_T_82545_3,		WMP_F_SERDES },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    829 	  "Intel i82546EB 1000BASE-T Ethernet",
    830 	  WM_T_82546,		WMP_F_COPPER },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    833 	  "Intel i82546EB 1000BASE-T Ethernet",
    834 	  WM_T_82546,		WMP_F_COPPER },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    837 	  "Intel i82545EM 1000BASE-X Ethernet",
    838 	  WM_T_82545,		WMP_F_FIBER },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    841 	  "Intel i82546EB 1000BASE-X Ethernet",
    842 	  WM_T_82546,		WMP_F_FIBER },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    845 	  "Intel i82546GB 1000BASE-T Ethernet",
    846 	  WM_T_82546_3,		WMP_F_COPPER },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    849 	  "Intel i82546GB 1000BASE-X Ethernet",
    850 	  WM_T_82546_3,		WMP_F_FIBER },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    853 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    854 	  WM_T_82546_3,		WMP_F_SERDES },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    857 	  "i82546GB quad-port Gigabit Ethernet",
    858 	  WM_T_82546_3,		WMP_F_COPPER },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    861 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    862 	  WM_T_82546_3,		WMP_F_COPPER },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    865 	  "Intel PRO/1000MT (82546GB)",
    866 	  WM_T_82546_3,		WMP_F_COPPER },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    869 	  "Intel i82541EI 1000BASE-T Ethernet",
    870 	  WM_T_82541,		WMP_F_COPPER },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    873 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    874 	  WM_T_82541,		WMP_F_COPPER },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    877 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    878 	  WM_T_82541,		WMP_F_COPPER },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    881 	  "Intel i82541ER 1000BASE-T Ethernet",
    882 	  WM_T_82541_2,		WMP_F_COPPER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    885 	  "Intel i82541GI 1000BASE-T Ethernet",
    886 	  WM_T_82541_2,		WMP_F_COPPER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    889 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    890 	  WM_T_82541_2,		WMP_F_COPPER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    893 	  "Intel i82541PI 1000BASE-T Ethernet",
    894 	  WM_T_82541_2,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    897 	  "Intel i82547EI 1000BASE-T Ethernet",
    898 	  WM_T_82547,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    901 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    902 	  WM_T_82547,		WMP_F_COPPER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    905 	  "Intel i82547GI 1000BASE-T Ethernet",
    906 	  WM_T_82547_2,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    909 	  "Intel PRO/1000 PT (82571EB)",
    910 	  WM_T_82571,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    913 	  "Intel PRO/1000 PF (82571EB)",
    914 	  WM_T_82571,		WMP_F_FIBER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    917 	  "Intel PRO/1000 PB (82571EB)",
    918 	  WM_T_82571,		WMP_F_SERDES },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    921 	  "Intel PRO/1000 QT (82571EB)",
    922 	  WM_T_82571,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    925 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    926 	  WM_T_82571,		WMP_F_COPPER, },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    929 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    930 	  WM_T_82571,		WMP_F_COPPER, },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    933 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    934 	  WM_T_82571,		WMP_F_SERDES, },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    937 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    938 	  WM_T_82571,		WMP_F_SERDES, },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    941 	  "Intel 82571EB Quad 1000baseX Ethernet",
    942 	  WM_T_82571,		WMP_F_FIBER, },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    945 	  "Intel i82572EI 1000baseT Ethernet",
    946 	  WM_T_82572,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    949 	  "Intel i82572EI 1000baseX Ethernet",
    950 	  WM_T_82572,		WMP_F_FIBER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    953 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    954 	  WM_T_82572,		WMP_F_SERDES },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    957 	  "Intel i82572EI 1000baseT Ethernet",
    958 	  WM_T_82572,		WMP_F_COPPER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    961 	  "Intel i82573E",
    962 	  WM_T_82573,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    965 	  "Intel i82573E IAMT",
    966 	  WM_T_82573,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    969 	  "Intel i82573L Gigabit Ethernet",
    970 	  WM_T_82573,		WMP_F_COPPER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    973 	  "Intel i82574L",
    974 	  WM_T_82574,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    977 	  "Intel i82574L",
    978 	  WM_T_82574,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    981 	  "Intel i82583V",
    982 	  WM_T_82583,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    985 	  "i80003 dual 1000baseT Ethernet",
    986 	  WM_T_80003,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    989 	  "i80003 dual 1000baseX Ethernet",
    990 	  WM_T_80003,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    993 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    994 	  WM_T_80003,		WMP_F_SERDES },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    997 	  "Intel i80003 1000baseT Ethernet",
    998 	  WM_T_80003,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1001 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1002 	  WM_T_80003,		WMP_F_SERDES },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1005 	  "Intel i82801H (M_AMT) LAN Controller",
   1006 	  WM_T_ICH8,		WMP_F_COPPER },
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1008 	  "Intel i82801H (AMT) LAN Controller",
   1009 	  WM_T_ICH8,		WMP_F_COPPER },
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1011 	  "Intel i82801H LAN Controller",
   1012 	  WM_T_ICH8,		WMP_F_COPPER },
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1014 	  "Intel i82801H (IFE) LAN Controller",
   1015 	  WM_T_ICH8,		WMP_F_COPPER },
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1017 	  "Intel i82801H (M) LAN Controller",
   1018 	  WM_T_ICH8,		WMP_F_COPPER },
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1020 	  "Intel i82801H IFE (GT) LAN Controller",
   1021 	  WM_T_ICH8,		WMP_F_COPPER },
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1023 	  "Intel i82801H IFE (G) LAN Controller",
   1024 	  WM_T_ICH8,		WMP_F_COPPER },
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1026 	  "82801I (AMT) LAN Controller",
   1027 	  WM_T_ICH9,		WMP_F_COPPER },
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1029 	  "82801I LAN Controller",
   1030 	  WM_T_ICH9,		WMP_F_COPPER },
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1032 	  "82801I (G) LAN Controller",
   1033 	  WM_T_ICH9,		WMP_F_COPPER },
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1035 	  "82801I (GT) LAN Controller",
   1036 	  WM_T_ICH9,		WMP_F_COPPER },
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1038 	  "82801I (C) LAN Controller",
   1039 	  WM_T_ICH9,		WMP_F_COPPER },
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1041 	  "82801I mobile LAN Controller",
   1042 	  WM_T_ICH9,		WMP_F_COPPER },
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1044 	  "82801I mobile (V) LAN Controller",
   1045 	  WM_T_ICH9,		WMP_F_COPPER },
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1047 	  "82801I mobile (AMT) LAN Controller",
   1048 	  WM_T_ICH9,		WMP_F_COPPER },
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1050 	  "82567LM-4 LAN Controller",
   1051 	  WM_T_ICH9,		WMP_F_COPPER },
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1053 	  "82567V-3 LAN Controller",
   1054 	  WM_T_ICH9,		WMP_F_COPPER },
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1056 	  "82567LM-2 LAN Controller",
   1057 	  WM_T_ICH10,		WMP_F_COPPER },
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1059 	  "82567LF-2 LAN Controller",
   1060 	  WM_T_ICH10,		WMP_F_COPPER },
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1062 	  "82567LM-3 LAN Controller",
   1063 	  WM_T_ICH10,		WMP_F_COPPER },
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1065 	  "82567LF-3 LAN Controller",
   1066 	  WM_T_ICH10,		WMP_F_COPPER },
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1068 	  "82567V-2 LAN Controller",
   1069 	  WM_T_ICH10,		WMP_F_COPPER },
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1071 	  "82567V-3? LAN Controller",
   1072 	  WM_T_ICH10,		WMP_F_COPPER },
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1074 	  "HANKSVILLE LAN Controller",
   1075 	  WM_T_ICH10,		WMP_F_COPPER },
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1077 	  "PCH LAN (82577LM) Controller",
   1078 	  WM_T_PCH,		WMP_F_COPPER },
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1080 	  "PCH LAN (82577LC) Controller",
   1081 	  WM_T_PCH,		WMP_F_COPPER },
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1083 	  "PCH LAN (82578DM) Controller",
   1084 	  WM_T_PCH,		WMP_F_COPPER },
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1086 	  "PCH LAN (82578DC) Controller",
   1087 	  WM_T_PCH,		WMP_F_COPPER },
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1089 	  "PCH2 LAN (82579LM) Controller",
   1090 	  WM_T_PCH2,		WMP_F_COPPER },
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1092 	  "PCH2 LAN (82579V) Controller",
   1093 	  WM_T_PCH2,		WMP_F_COPPER },
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1095 	  "82575EB dual-1000baseT Ethernet",
   1096 	  WM_T_82575,		WMP_F_COPPER },
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1098 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1099 	  WM_T_82575,		WMP_F_SERDES },
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1101 	  "82575GB quad-1000baseT Ethernet",
   1102 	  WM_T_82575,		WMP_F_COPPER },
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1104 	  "82575GB quad-1000baseT Ethernet (PM)",
   1105 	  WM_T_82575,		WMP_F_COPPER },
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1107 	  "82576 1000BaseT Ethernet",
   1108 	  WM_T_82576,		WMP_F_COPPER },
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1110 	  "82576 1000BaseX Ethernet",
   1111 	  WM_T_82576,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1114 	  "82576 gigabit Ethernet (SERDES)",
   1115 	  WM_T_82576,		WMP_F_SERDES },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1118 	  "82576 quad-1000BaseT Ethernet",
   1119 	  WM_T_82576,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1122 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1123 	  WM_T_82576,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1126 	  "82576 gigabit Ethernet",
   1127 	  WM_T_82576,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1130 	  "82576 gigabit Ethernet (SERDES)",
   1131 	  WM_T_82576,		WMP_F_SERDES },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1133 	  "82576 quad-gigabit Ethernet (SERDES)",
   1134 	  WM_T_82576,		WMP_F_SERDES },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1137 	  "82580 1000BaseT Ethernet",
   1138 	  WM_T_82580,		WMP_F_COPPER },
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1140 	  "82580 1000BaseX Ethernet",
   1141 	  WM_T_82580,		WMP_F_FIBER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1144 	  "82580 1000BaseT Ethernet (SERDES)",
   1145 	  WM_T_82580,		WMP_F_SERDES },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1148 	  "82580 gigabit Ethernet (SGMII)",
   1149 	  WM_T_82580,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1151 	  "82580 dual-1000BaseT Ethernet",
   1152 	  WM_T_82580,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1155 	  "82580 quad-1000BaseX Ethernet",
   1156 	  WM_T_82580,		WMP_F_FIBER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1159 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1160 	  WM_T_82580,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1163 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1164 	  WM_T_82580,		WMP_F_SERDES },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1167 	  "DH89XXCC 1000BASE-KX Ethernet",
   1168 	  WM_T_82580,		WMP_F_SERDES },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1171 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1172 	  WM_T_82580,		WMP_F_SERDES },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1175 	  "I350 Gigabit Network Connection",
   1176 	  WM_T_I350,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1179 	  "I350 Gigabit Fiber Network Connection",
   1180 	  WM_T_I350,		WMP_F_FIBER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1183 	  "I350 Gigabit Backplane Connection",
   1184 	  WM_T_I350,		WMP_F_SERDES },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1187 	  "I350 Quad Port Gigabit Ethernet",
   1188 	  WM_T_I350,		WMP_F_SERDES },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1191 	  "I350 Gigabit Connection",
   1192 	  WM_T_I350,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1195 	  "I354 Gigabit Ethernet (KX)",
   1196 	  WM_T_I354,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1199 	  "I354 Gigabit Ethernet (SGMII)",
   1200 	  WM_T_I354,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1203 	  "I354 Gigabit Ethernet (2.5G)",
   1204 	  WM_T_I354,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1207 	  "I210-T1 Ethernet Server Adapter",
   1208 	  WM_T_I210,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1211 	  "I210 Ethernet (Copper OEM)",
   1212 	  WM_T_I210,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1215 	  "I210 Ethernet (Copper IT)",
   1216 	  WM_T_I210,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1219 	  "I210 Ethernet (FLASH less)",
   1220 	  WM_T_I210,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1223 	  "I210 Gigabit Ethernet (Fiber)",
   1224 	  WM_T_I210,		WMP_F_FIBER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1227 	  "I210 Gigabit Ethernet (SERDES)",
   1228 	  WM_T_I210,		WMP_F_SERDES },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1231 	  "I210 Gigabit Ethernet (FLASH less)",
   1232 	  WM_T_I210,		WMP_F_SERDES },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1235 	  "I210 Gigabit Ethernet (SGMII)",
   1236 	  WM_T_I210,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1239 	  "I211 Ethernet (COPPER)",
   1240 	  WM_T_I211,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1242 	  "I217 V Ethernet Connection",
   1243 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1245 	  "I217 LM Ethernet Connection",
   1246 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1248 	  "I218 V Ethernet Connection",
   1249 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1251 	  "I218 V Ethernet Connection",
   1252 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1254 	  "I218 V Ethernet Connection",
   1255 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1257 	  "I218 LM Ethernet Connection",
   1258 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1260 	  "I218 LM Ethernet Connection",
   1261 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1263 	  "I218 LM Ethernet Connection",
   1264 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1265 	{ 0,			0,
   1266 	  NULL,
   1267 	  0,			0 },
   1268 };
   1269 
   1270 #ifdef WM_EVENT_COUNTERS
   1271 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1272 #endif /* WM_EVENT_COUNTERS */
   1273 
   1274 
   1275 /*
   1276  * Register read/write functions.
   1277  * Other than CSR_{READ|WRITE}().
   1278  */
   1279 
   1280 #if 0 /* Not currently used */
   1281 static inline uint32_t
   1282 wm_io_read(struct wm_softc *sc, int reg)
   1283 {
   1284 
   1285 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1286 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1287 }
   1288 #endif
   1289 
   1290 static inline void
   1291 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1292 {
   1293 
   1294 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1295 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1296 }
   1297 
   1298 static inline void
   1299 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1300     uint32_t data)
   1301 {
   1302 	uint32_t regval;
   1303 	int i;
   1304 
   1305 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1306 
   1307 	CSR_WRITE(sc, reg, regval);
   1308 
   1309 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1310 		delay(5);
   1311 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1312 			break;
   1313 	}
   1314 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1315 		aprint_error("%s: WARNING:"
   1316 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1317 		    device_xname(sc->sc_dev), reg);
   1318 	}
   1319 }
   1320 
   1321 static inline void
   1322 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1323 {
   1324 	wa->wa_low = htole32(v & 0xffffffffU);
   1325 	if (sizeof(bus_addr_t) == 8)
   1326 		wa->wa_high = htole32((uint64_t) v >> 32);
   1327 	else
   1328 		wa->wa_high = 0;
   1329 }
   1330 
   1331 /*
   1332  * Device driver interface functions and commonly used functions.
   1333  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1334  */
   1335 
   1336 /* Lookup supported device table */
   1337 static const struct wm_product *
   1338 wm_lookup(const struct pci_attach_args *pa)
   1339 {
   1340 	const struct wm_product *wmp;
   1341 
   1342 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1343 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1344 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1345 			return wmp;
   1346 	}
   1347 	return NULL;
   1348 }
   1349 
   1350 /* The match function (ca_match) */
   1351 static int
   1352 wm_match(device_t parent, cfdata_t cf, void *aux)
   1353 {
   1354 	struct pci_attach_args *pa = aux;
   1355 
   1356 	if (wm_lookup(pa) != NULL)
   1357 		return 1;
   1358 
   1359 	return 0;
   1360 }
   1361 
   1362 /* The attach function (ca_attach) */
   1363 static void
   1364 wm_attach(device_t parent, device_t self, void *aux)
   1365 {
   1366 	struct wm_softc *sc = device_private(self);
   1367 	struct pci_attach_args *pa = aux;
   1368 	prop_dictionary_t dict;
   1369 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1370 	pci_chipset_tag_t pc = pa->pa_pc;
   1371 	pci_intr_handle_t ih;
   1372 	const char *intrstr = NULL;
   1373 	const char *eetype, *xname;
   1374 	bus_space_tag_t memt;
   1375 	bus_space_handle_t memh;
   1376 	bus_size_t memsize;
   1377 	int memh_valid;
   1378 	int i, error;
   1379 	const struct wm_product *wmp;
   1380 	prop_data_t ea;
   1381 	prop_number_t pn;
   1382 	uint8_t enaddr[ETHER_ADDR_LEN];
   1383 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1384 	pcireg_t preg, memtype;
   1385 	uint16_t eeprom_data, apme_mask;
   1386 	bool force_clear_smbi;
   1387 	uint32_t link_mode;
   1388 	uint32_t reg;
   1389 	char intrbuf[PCI_INTRSTR_LEN];
   1390 
   1391 	sc->sc_dev = self;
   1392 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1393 	sc->sc_stopping = false;
   1394 
   1395 	wmp = wm_lookup(pa);
   1396 #ifdef DIAGNOSTIC
   1397 	if (wmp == NULL) {
   1398 		printf("\n");
   1399 		panic("wm_attach: impossible");
   1400 	}
   1401 #endif
   1402 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1403 
   1404 	sc->sc_pc = pa->pa_pc;
   1405 	sc->sc_pcitag = pa->pa_tag;
   1406 
   1407 	if (pci_dma64_available(pa))
   1408 		sc->sc_dmat = pa->pa_dmat64;
   1409 	else
   1410 		sc->sc_dmat = pa->pa_dmat;
   1411 
   1412 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1413 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1414 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1415 
   1416 	sc->sc_type = wmp->wmp_type;
   1417 	if (sc->sc_type < WM_T_82543) {
   1418 		if (sc->sc_rev < 2) {
   1419 			aprint_error_dev(sc->sc_dev,
   1420 			    "i82542 must be at least rev. 2\n");
   1421 			return;
   1422 		}
   1423 		if (sc->sc_rev < 3)
   1424 			sc->sc_type = WM_T_82542_2_0;
   1425 	}
   1426 
   1427 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1428 	    || (sc->sc_type == WM_T_82580)
   1429 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1430 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1431 		sc->sc_flags |= WM_F_NEWQUEUE;
   1432 
   1433 	/* Set device properties (mactype) */
   1434 	dict = device_properties(sc->sc_dev);
   1435 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1436 
   1437 	/*
   1438 	 * Map the device.  All devices support memory-mapped acccess,
   1439 	 * and it is really required for normal operation.
   1440 	 */
   1441 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1442 	switch (memtype) {
   1443 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1444 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1445 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1446 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1447 		break;
   1448 	default:
   1449 		memh_valid = 0;
   1450 		break;
   1451 	}
   1452 
   1453 	if (memh_valid) {
   1454 		sc->sc_st = memt;
   1455 		sc->sc_sh = memh;
   1456 		sc->sc_ss = memsize;
   1457 	} else {
   1458 		aprint_error_dev(sc->sc_dev,
   1459 		    "unable to map device registers\n");
   1460 		return;
   1461 	}
   1462 
   1463 	/*
   1464 	 * In addition, i82544 and later support I/O mapped indirect
   1465 	 * register access.  It is not desirable (nor supported in
   1466 	 * this driver) to use it for normal operation, though it is
   1467 	 * required to work around bugs in some chip versions.
   1468 	 */
   1469 	if (sc->sc_type >= WM_T_82544) {
   1470 		/* First we have to find the I/O BAR. */
   1471 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1472 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1473 			if (memtype == PCI_MAPREG_TYPE_IO)
   1474 				break;
   1475 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1476 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1477 				i += 4;	/* skip high bits, too */
   1478 		}
   1479 		if (i < PCI_MAPREG_END) {
   1480 			/*
   1481 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1482 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1483 			 * It's no problem because newer chips has no this
   1484 			 * bug.
   1485 			 *
   1486 			 * The i8254x doesn't apparently respond when the
   1487 			 * I/O BAR is 0, which looks somewhat like it's not
   1488 			 * been configured.
   1489 			 */
   1490 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1491 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1492 				aprint_error_dev(sc->sc_dev,
   1493 				    "WARNING: I/O BAR at zero.\n");
   1494 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1495 					0, &sc->sc_iot, &sc->sc_ioh,
   1496 					NULL, &sc->sc_ios) == 0) {
   1497 				sc->sc_flags |= WM_F_IOH_VALID;
   1498 			} else {
   1499 				aprint_error_dev(sc->sc_dev,
   1500 				    "WARNING: unable to map I/O space\n");
   1501 			}
   1502 		}
   1503 
   1504 	}
   1505 
   1506 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1507 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1508 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1509 	if (sc->sc_type < WM_T_82542_2_1)
   1510 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1511 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1512 
   1513 	/* power up chip */
   1514 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1515 	    NULL)) && error != EOPNOTSUPP) {
   1516 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1517 		return;
   1518 	}
   1519 
   1520 	/*
   1521 	 * Map and establish our interrupt.
   1522 	 */
   1523 	if (pci_intr_map(pa, &ih)) {
   1524 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1525 		return;
   1526 	}
   1527 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1528 #ifdef WM_MPSAFE
   1529 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1530 #endif
   1531 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1532 	if (sc->sc_ih == NULL) {
   1533 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1534 		if (intrstr != NULL)
   1535 			aprint_error(" at %s", intrstr);
   1536 		aprint_error("\n");
   1537 		return;
   1538 	}
   1539 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1540 
   1541 	/*
   1542 	 * Check the function ID (unit number of the chip).
   1543 	 */
   1544 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1545 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1546 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1547 	    || (sc->sc_type == WM_T_82580)
   1548 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1549 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1550 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1551 	else
   1552 		sc->sc_funcid = 0;
   1553 
   1554 	/*
   1555 	 * Determine a few things about the bus we're connected to.
   1556 	 */
   1557 	if (sc->sc_type < WM_T_82543) {
   1558 		/* We don't really know the bus characteristics here. */
   1559 		sc->sc_bus_speed = 33;
   1560 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1561 		/*
   1562 		 * CSA (Communication Streaming Architecture) is about as fast
   1563 		 * a 32-bit 66MHz PCI Bus.
   1564 		 */
   1565 		sc->sc_flags |= WM_F_CSA;
   1566 		sc->sc_bus_speed = 66;
   1567 		aprint_verbose_dev(sc->sc_dev,
   1568 		    "Communication Streaming Architecture\n");
   1569 		if (sc->sc_type == WM_T_82547) {
   1570 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1571 			callout_setfunc(&sc->sc_txfifo_ch,
   1572 					wm_82547_txfifo_stall, sc);
   1573 			aprint_verbose_dev(sc->sc_dev,
   1574 			    "using 82547 Tx FIFO stall work-around\n");
   1575 		}
   1576 	} else if (sc->sc_type >= WM_T_82571) {
   1577 		sc->sc_flags |= WM_F_PCIE;
   1578 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1579 		    && (sc->sc_type != WM_T_ICH10)
   1580 		    && (sc->sc_type != WM_T_PCH)
   1581 		    && (sc->sc_type != WM_T_PCH2)
   1582 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1583 			/* ICH* and PCH* have no PCIe capability registers */
   1584 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1585 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1586 				NULL) == 0)
   1587 				aprint_error_dev(sc->sc_dev,
   1588 				    "unable to find PCIe capability\n");
   1589 		}
   1590 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1591 	} else {
   1592 		reg = CSR_READ(sc, WMREG_STATUS);
   1593 		if (reg & STATUS_BUS64)
   1594 			sc->sc_flags |= WM_F_BUS64;
   1595 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1596 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1597 
   1598 			sc->sc_flags |= WM_F_PCIX;
   1599 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1600 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1601 				aprint_error_dev(sc->sc_dev,
   1602 				    "unable to find PCIX capability\n");
   1603 			else if (sc->sc_type != WM_T_82545_3 &&
   1604 				 sc->sc_type != WM_T_82546_3) {
   1605 				/*
   1606 				 * Work around a problem caused by the BIOS
   1607 				 * setting the max memory read byte count
   1608 				 * incorrectly.
   1609 				 */
   1610 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1611 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1612 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1613 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1614 
   1615 				bytecnt =
   1616 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1617 				    PCIX_CMD_BYTECNT_SHIFT;
   1618 				maxb =
   1619 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1620 				    PCIX_STATUS_MAXB_SHIFT;
   1621 				if (bytecnt > maxb) {
   1622 					aprint_verbose_dev(sc->sc_dev,
   1623 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1624 					    512 << bytecnt, 512 << maxb);
   1625 					pcix_cmd = (pcix_cmd &
   1626 					    ~PCIX_CMD_BYTECNT_MASK) |
   1627 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1628 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1629 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1630 					    pcix_cmd);
   1631 				}
   1632 			}
   1633 		}
   1634 		/*
   1635 		 * The quad port adapter is special; it has a PCIX-PCIX
   1636 		 * bridge on the board, and can run the secondary bus at
   1637 		 * a higher speed.
   1638 		 */
   1639 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1640 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1641 								      : 66;
   1642 		} else if (sc->sc_flags & WM_F_PCIX) {
   1643 			switch (reg & STATUS_PCIXSPD_MASK) {
   1644 			case STATUS_PCIXSPD_50_66:
   1645 				sc->sc_bus_speed = 66;
   1646 				break;
   1647 			case STATUS_PCIXSPD_66_100:
   1648 				sc->sc_bus_speed = 100;
   1649 				break;
   1650 			case STATUS_PCIXSPD_100_133:
   1651 				sc->sc_bus_speed = 133;
   1652 				break;
   1653 			default:
   1654 				aprint_error_dev(sc->sc_dev,
   1655 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1656 				    reg & STATUS_PCIXSPD_MASK);
   1657 				sc->sc_bus_speed = 66;
   1658 				break;
   1659 			}
   1660 		} else
   1661 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1662 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1663 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1664 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1665 	}
   1666 
   1667 	/*
   1668 	 * Allocate the control data structures, and create and load the
   1669 	 * DMA map for it.
   1670 	 *
   1671 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1672 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1673 	 * both sets within the same 4G segment.
   1674 	 */
   1675 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1676 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1677 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1678 	    sizeof(struct wm_control_data_82542) :
   1679 	    sizeof(struct wm_control_data_82544);
   1680 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1681 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1682 		    &sc->sc_cd_rseg, 0)) != 0) {
   1683 		aprint_error_dev(sc->sc_dev,
   1684 		    "unable to allocate control data, error = %d\n",
   1685 		    error);
   1686 		goto fail_0;
   1687 	}
   1688 
   1689 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1690 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1691 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1692 		aprint_error_dev(sc->sc_dev,
   1693 		    "unable to map control data, error = %d\n", error);
   1694 		goto fail_1;
   1695 	}
   1696 
   1697 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1698 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1699 		aprint_error_dev(sc->sc_dev,
   1700 		    "unable to create control data DMA map, error = %d\n",
   1701 		    error);
   1702 		goto fail_2;
   1703 	}
   1704 
   1705 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1706 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1707 		aprint_error_dev(sc->sc_dev,
   1708 		    "unable to load control data DMA map, error = %d\n",
   1709 		    error);
   1710 		goto fail_3;
   1711 	}
   1712 
   1713 	/* Create the transmit buffer DMA maps. */
   1714 	WM_TXQUEUELEN(sc) =
   1715 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1716 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1717 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1718 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1719 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1720 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1721 			aprint_error_dev(sc->sc_dev,
   1722 			    "unable to create Tx DMA map %d, error = %d\n",
   1723 			    i, error);
   1724 			goto fail_4;
   1725 		}
   1726 	}
   1727 
   1728 	/* Create the receive buffer DMA maps. */
   1729 	for (i = 0; i < WM_NRXDESC; i++) {
   1730 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1731 			    MCLBYTES, 0, 0,
   1732 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "unable to create Rx DMA map %d error = %d\n",
   1735 			    i, error);
   1736 			goto fail_5;
   1737 		}
   1738 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1739 	}
   1740 
   1741 	/* clear interesting stat counters */
   1742 	CSR_READ(sc, WMREG_COLC);
   1743 	CSR_READ(sc, WMREG_RXERRC);
   1744 
   1745 	/* get PHY control from SMBus to PCIe */
   1746 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1747 	    || (sc->sc_type == WM_T_PCH_LPT))
   1748 		wm_smbustopci(sc);
   1749 
   1750 	/* Reset the chip to a known state. */
   1751 	wm_reset(sc);
   1752 
   1753 	/* Get some information about the EEPROM. */
   1754 	switch (sc->sc_type) {
   1755 	case WM_T_82542_2_0:
   1756 	case WM_T_82542_2_1:
   1757 	case WM_T_82543:
   1758 	case WM_T_82544:
   1759 		/* Microwire */
   1760 		sc->sc_nvm_wordsize = 64;
   1761 		sc->sc_nvm_addrbits = 6;
   1762 		break;
   1763 	case WM_T_82540:
   1764 	case WM_T_82545:
   1765 	case WM_T_82545_3:
   1766 	case WM_T_82546:
   1767 	case WM_T_82546_3:
   1768 		/* Microwire */
   1769 		reg = CSR_READ(sc, WMREG_EECD);
   1770 		if (reg & EECD_EE_SIZE) {
   1771 			sc->sc_nvm_wordsize = 256;
   1772 			sc->sc_nvm_addrbits = 8;
   1773 		} else {
   1774 			sc->sc_nvm_wordsize = 64;
   1775 			sc->sc_nvm_addrbits = 6;
   1776 		}
   1777 		sc->sc_flags |= WM_F_LOCK_EECD;
   1778 		break;
   1779 	case WM_T_82541:
   1780 	case WM_T_82541_2:
   1781 	case WM_T_82547:
   1782 	case WM_T_82547_2:
   1783 		sc->sc_flags |= WM_F_LOCK_EECD;
   1784 		reg = CSR_READ(sc, WMREG_EECD);
   1785 		if (reg & EECD_EE_TYPE) {
   1786 			/* SPI */
   1787 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1788 			wm_nvm_set_addrbits_size_eecd(sc);
   1789 		} else {
   1790 			/* Microwire */
   1791 			if ((reg & EECD_EE_ABITS) != 0) {
   1792 				sc->sc_nvm_wordsize = 256;
   1793 				sc->sc_nvm_addrbits = 8;
   1794 			} else {
   1795 				sc->sc_nvm_wordsize = 64;
   1796 				sc->sc_nvm_addrbits = 6;
   1797 			}
   1798 		}
   1799 		break;
   1800 	case WM_T_82571:
   1801 	case WM_T_82572:
   1802 		/* SPI */
   1803 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1804 		wm_nvm_set_addrbits_size_eecd(sc);
   1805 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1806 		break;
   1807 	case WM_T_82573:
   1808 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1809 		/* FALLTHROUGH */
   1810 	case WM_T_82574:
   1811 	case WM_T_82583:
   1812 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1813 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1814 			sc->sc_nvm_wordsize = 2048;
   1815 		} else {
   1816 			/* SPI */
   1817 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1818 			wm_nvm_set_addrbits_size_eecd(sc);
   1819 		}
   1820 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1821 		break;
   1822 	case WM_T_82575:
   1823 	case WM_T_82576:
   1824 	case WM_T_82580:
   1825 	case WM_T_I350:
   1826 	case WM_T_I354:
   1827 	case WM_T_80003:
   1828 		/* SPI */
   1829 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1830 		wm_nvm_set_addrbits_size_eecd(sc);
   1831 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1832 		    | WM_F_LOCK_SWSM;
   1833 		break;
   1834 	case WM_T_ICH8:
   1835 	case WM_T_ICH9:
   1836 	case WM_T_ICH10:
   1837 	case WM_T_PCH:
   1838 	case WM_T_PCH2:
   1839 	case WM_T_PCH_LPT:
   1840 		/* FLASH */
   1841 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1842 		sc->sc_nvm_wordsize = 2048;
   1843 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1844 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1845 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1846 			aprint_error_dev(sc->sc_dev,
   1847 			    "can't map FLASH registers\n");
   1848 			goto fail_5;
   1849 		}
   1850 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1851 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1852 						ICH_FLASH_SECTOR_SIZE;
   1853 		sc->sc_ich8_flash_bank_size =
   1854 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1855 		sc->sc_ich8_flash_bank_size -=
   1856 		    (reg & ICH_GFPREG_BASE_MASK);
   1857 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1858 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1859 		break;
   1860 	case WM_T_I210:
   1861 	case WM_T_I211:
   1862 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1863 			wm_nvm_set_addrbits_size_eecd(sc);
   1864 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1865 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1866 		} else {
   1867 			sc->sc_nvm_wordsize = INVM_SIZE;
   1868 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1869 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1870 		}
   1871 		break;
   1872 	default:
   1873 		break;
   1874 	}
   1875 
   1876 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1877 	switch (sc->sc_type) {
   1878 	case WM_T_82571:
   1879 	case WM_T_82572:
   1880 		reg = CSR_READ(sc, WMREG_SWSM2);
   1881 		if ((reg & SWSM2_LOCK) == 0) {
   1882 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1883 			force_clear_smbi = true;
   1884 		} else
   1885 			force_clear_smbi = false;
   1886 		break;
   1887 	case WM_T_82573:
   1888 	case WM_T_82574:
   1889 	case WM_T_82583:
   1890 		force_clear_smbi = true;
   1891 		break;
   1892 	default:
   1893 		force_clear_smbi = false;
   1894 		break;
   1895 	}
   1896 	if (force_clear_smbi) {
   1897 		reg = CSR_READ(sc, WMREG_SWSM);
   1898 		if ((reg & SWSM_SMBI) != 0)
   1899 			aprint_error_dev(sc->sc_dev,
   1900 			    "Please update the Bootagent\n");
   1901 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1902 	}
   1903 
   1904 	/*
   1905 	 * Defer printing the EEPROM type until after verifying the checksum
   1906 	 * This allows the EEPROM type to be printed correctly in the case
   1907 	 * that no EEPROM is attached.
   1908 	 */
   1909 	/*
   1910 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1911 	 * this for later, so we can fail future reads from the EEPROM.
   1912 	 */
   1913 	if (wm_nvm_validate_checksum(sc)) {
   1914 		/*
   1915 		 * Read twice again because some PCI-e parts fail the
   1916 		 * first check due to the link being in sleep state.
   1917 		 */
   1918 		if (wm_nvm_validate_checksum(sc))
   1919 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1920 	}
   1921 
   1922 	/* Set device properties (macflags) */
   1923 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1924 
   1925 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1926 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   1927 	else {
   1928 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   1929 		    sc->sc_nvm_wordsize);
   1930 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   1931 			aprint_verbose("iNVM");
   1932 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   1933 			aprint_verbose("FLASH(HW)");
   1934 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   1935 			aprint_verbose("FLASH");
   1936 		else {
   1937 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   1938 				eetype = "SPI";
   1939 			else
   1940 				eetype = "MicroWire";
   1941 			aprint_verbose("(%d address bits) %s EEPROM",
   1942 			    sc->sc_nvm_addrbits, eetype);
   1943 		}
   1944 	}
   1945 	wm_nvm_version(sc);
   1946 	aprint_verbose("\n");
   1947 
   1948 	/* Check for I21[01] PLL workaround */
   1949 	if (sc->sc_type == WM_T_I210)
   1950 		sc->sc_flags |= WM_F_PLL_WA_I210;
   1951 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   1952 		/* NVM image release 3.25 has a workaround */
   1953 		if ((sc->sc_nvm_ver_major > 3)
   1954 		    || ((sc->sc_nvm_ver_major == 3)
   1955 			&& (sc->sc_nvm_ver_minor >= 25)))
   1956 			return;
   1957 		else {
   1958 			aprint_verbose_dev(sc->sc_dev,
   1959 			    "ROM image version %d.%d is older than 3.25\n",
   1960 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   1961 			sc->sc_flags |= WM_F_PLL_WA_I210;
   1962 		}
   1963 	}
   1964 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   1965 		wm_pll_workaround_i210(sc);
   1966 
   1967 	switch (sc->sc_type) {
   1968 	case WM_T_82571:
   1969 	case WM_T_82572:
   1970 	case WM_T_82573:
   1971 	case WM_T_82574:
   1972 	case WM_T_82583:
   1973 	case WM_T_80003:
   1974 	case WM_T_ICH8:
   1975 	case WM_T_ICH9:
   1976 	case WM_T_ICH10:
   1977 	case WM_T_PCH:
   1978 	case WM_T_PCH2:
   1979 	case WM_T_PCH_LPT:
   1980 		if (wm_check_mng_mode(sc) != 0)
   1981 			wm_get_hw_control(sc);
   1982 		break;
   1983 	default:
   1984 		break;
   1985 	}
   1986 	wm_get_wakeup(sc);
   1987 	/*
   1988 	 * Read the Ethernet address from the EEPROM, if not first found
   1989 	 * in device properties.
   1990 	 */
   1991 	ea = prop_dictionary_get(dict, "mac-address");
   1992 	if (ea != NULL) {
   1993 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1994 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1995 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1996 	} else {
   1997 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1998 			aprint_error_dev(sc->sc_dev,
   1999 			    "unable to read Ethernet address\n");
   2000 			goto fail_5;
   2001 		}
   2002 	}
   2003 
   2004 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2005 	    ether_sprintf(enaddr));
   2006 
   2007 	/*
   2008 	 * Read the config info from the EEPROM, and set up various
   2009 	 * bits in the control registers based on their contents.
   2010 	 */
   2011 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2012 	if (pn != NULL) {
   2013 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2014 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2015 	} else {
   2016 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2017 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2018 			goto fail_5;
   2019 		}
   2020 	}
   2021 
   2022 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2023 	if (pn != NULL) {
   2024 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2025 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2026 	} else {
   2027 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2028 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2029 			goto fail_5;
   2030 		}
   2031 	}
   2032 
   2033 	/* check for WM_F_WOL */
   2034 	switch (sc->sc_type) {
   2035 	case WM_T_82542_2_0:
   2036 	case WM_T_82542_2_1:
   2037 	case WM_T_82543:
   2038 		/* dummy? */
   2039 		eeprom_data = 0;
   2040 		apme_mask = NVM_CFG3_APME;
   2041 		break;
   2042 	case WM_T_82544:
   2043 		apme_mask = NVM_CFG2_82544_APM_EN;
   2044 		eeprom_data = cfg2;
   2045 		break;
   2046 	case WM_T_82546:
   2047 	case WM_T_82546_3:
   2048 	case WM_T_82571:
   2049 	case WM_T_82572:
   2050 	case WM_T_82573:
   2051 	case WM_T_82574:
   2052 	case WM_T_82583:
   2053 	case WM_T_80003:
   2054 	default:
   2055 		apme_mask = NVM_CFG3_APME;
   2056 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2057 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2058 		break;
   2059 	case WM_T_82575:
   2060 	case WM_T_82576:
   2061 	case WM_T_82580:
   2062 	case WM_T_I350:
   2063 	case WM_T_I354: /* XXX ok? */
   2064 	case WM_T_ICH8:
   2065 	case WM_T_ICH9:
   2066 	case WM_T_ICH10:
   2067 	case WM_T_PCH:
   2068 	case WM_T_PCH2:
   2069 	case WM_T_PCH_LPT:
   2070 		/* XXX The funcid should be checked on some devices */
   2071 		apme_mask = WUC_APME;
   2072 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2073 		break;
   2074 	}
   2075 
   2076 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2077 	if ((eeprom_data & apme_mask) != 0)
   2078 		sc->sc_flags |= WM_F_WOL;
   2079 #ifdef WM_DEBUG
   2080 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2081 		printf("WOL\n");
   2082 #endif
   2083 
   2084 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2085 		/* Check NVM for autonegotiation */
   2086 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2087 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2088 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2089 		}
   2090 	}
   2091 
   2092 	/*
   2093 	 * XXX need special handling for some multiple port cards
   2094 	 * to disable a paticular port.
   2095 	 */
   2096 
   2097 	if (sc->sc_type >= WM_T_82544) {
   2098 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2099 		if (pn != NULL) {
   2100 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2101 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2102 		} else {
   2103 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2104 				aprint_error_dev(sc->sc_dev,
   2105 				    "unable to read SWDPIN\n");
   2106 				goto fail_5;
   2107 			}
   2108 		}
   2109 	}
   2110 
   2111 	if (cfg1 & NVM_CFG1_ILOS)
   2112 		sc->sc_ctrl |= CTRL_ILOS;
   2113 
   2114 	/*
   2115 	 * XXX
   2116 	 * This code isn't correct because pin 2 and 3 are located
   2117 	 * in different position on newer chips. Check all datasheet.
   2118 	 *
   2119 	 * Until resolve this problem, check if a chip < 82580
   2120 	 */
   2121 	if (sc->sc_type <= WM_T_82580) {
   2122 		if (sc->sc_type >= WM_T_82544) {
   2123 			sc->sc_ctrl |=
   2124 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2125 			    CTRL_SWDPIO_SHIFT;
   2126 			sc->sc_ctrl |=
   2127 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2128 			    CTRL_SWDPINS_SHIFT;
   2129 		} else {
   2130 			sc->sc_ctrl |=
   2131 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2132 			    CTRL_SWDPIO_SHIFT;
   2133 		}
   2134 	}
   2135 
   2136 	/* XXX For other than 82580? */
   2137 	if (sc->sc_type == WM_T_82580) {
   2138 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2139 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2140 		if (nvmword & __BIT(13)) {
   2141 			printf("SET ILOS\n");
   2142 			sc->sc_ctrl |= CTRL_ILOS;
   2143 		}
   2144 	}
   2145 
   2146 #if 0
   2147 	if (sc->sc_type >= WM_T_82544) {
   2148 		if (cfg1 & NVM_CFG1_IPS0)
   2149 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2150 		if (cfg1 & NVM_CFG1_IPS1)
   2151 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2152 		sc->sc_ctrl_ext |=
   2153 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2154 		    CTRL_EXT_SWDPIO_SHIFT;
   2155 		sc->sc_ctrl_ext |=
   2156 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2157 		    CTRL_EXT_SWDPINS_SHIFT;
   2158 	} else {
   2159 		sc->sc_ctrl_ext |=
   2160 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2161 		    CTRL_EXT_SWDPIO_SHIFT;
   2162 	}
   2163 #endif
   2164 
   2165 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2166 #if 0
   2167 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2168 #endif
   2169 
   2170 	/*
   2171 	 * Set up some register offsets that are different between
   2172 	 * the i82542 and the i82543 and later chips.
   2173 	 */
   2174 	if (sc->sc_type < WM_T_82543) {
   2175 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   2176 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   2177 	} else {
   2178 		sc->sc_rdt_reg = WMREG_RDT;
   2179 		sc->sc_tdt_reg = WMREG_TDT;
   2180 	}
   2181 
   2182 	if (sc->sc_type == WM_T_PCH) {
   2183 		uint16_t val;
   2184 
   2185 		/* Save the NVM K1 bit setting */
   2186 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2187 
   2188 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2189 			sc->sc_nvm_k1_enabled = 1;
   2190 		else
   2191 			sc->sc_nvm_k1_enabled = 0;
   2192 	}
   2193 
   2194 	/*
   2195 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2196 	 * media structures accordingly.
   2197 	 */
   2198 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2199 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2200 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2201 	    || sc->sc_type == WM_T_82573
   2202 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2203 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2204 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2205 	} else if (sc->sc_type < WM_T_82543 ||
   2206 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2207 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2208 			aprint_error_dev(sc->sc_dev,
   2209 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2210 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2211 		}
   2212 		wm_tbi_mediainit(sc);
   2213 	} else {
   2214 		switch (sc->sc_type) {
   2215 		case WM_T_82575:
   2216 		case WM_T_82576:
   2217 		case WM_T_82580:
   2218 		case WM_T_I350:
   2219 		case WM_T_I354:
   2220 		case WM_T_I210:
   2221 		case WM_T_I211:
   2222 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2223 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2224 			switch (link_mode) {
   2225 			case CTRL_EXT_LINK_MODE_1000KX:
   2226 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2227 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2228 				break;
   2229 			case CTRL_EXT_LINK_MODE_SGMII:
   2230 				if (wm_sgmii_uses_mdio(sc)) {
   2231 					aprint_verbose_dev(sc->sc_dev,
   2232 					    "SGMII(MDIO)\n");
   2233 					sc->sc_flags |= WM_F_SGMII;
   2234 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2235 					break;
   2236 				}
   2237 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2238 				/*FALLTHROUGH*/
   2239 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2240 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2241 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2242 					if (link_mode
   2243 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2244 						sc->sc_mediatype
   2245 						    = WM_MEDIATYPE_COPPER;
   2246 						sc->sc_flags |= WM_F_SGMII;
   2247 					} else {
   2248 						sc->sc_mediatype
   2249 						    = WM_MEDIATYPE_SERDES;
   2250 						aprint_verbose_dev(sc->sc_dev,
   2251 						    "SERDES\n");
   2252 					}
   2253 					break;
   2254 				}
   2255 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2256 					aprint_verbose_dev(sc->sc_dev,
   2257 					    "SERDES\n");
   2258 
   2259 				/* Change current link mode setting */
   2260 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2261 				switch (sc->sc_mediatype) {
   2262 				case WM_MEDIATYPE_COPPER:
   2263 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2264 					break;
   2265 				case WM_MEDIATYPE_SERDES:
   2266 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2267 					break;
   2268 				default:
   2269 					break;
   2270 				}
   2271 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2272 				break;
   2273 			case CTRL_EXT_LINK_MODE_GMII:
   2274 			default:
   2275 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2276 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2277 				break;
   2278 			}
   2279 
   2280 			reg &= ~CTRL_EXT_I2C_ENA;
   2281 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2282 				reg |= CTRL_EXT_I2C_ENA;
   2283 			else
   2284 				reg &= ~CTRL_EXT_I2C_ENA;
   2285 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2286 
   2287 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2288 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2289 			else
   2290 				wm_tbi_mediainit(sc);
   2291 			break;
   2292 		default:
   2293 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2294 				aprint_error_dev(sc->sc_dev,
   2295 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2296 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2297 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2298 		}
   2299 	}
   2300 
   2301 	ifp = &sc->sc_ethercom.ec_if;
   2302 	xname = device_xname(sc->sc_dev);
   2303 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2304 	ifp->if_softc = sc;
   2305 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2306 	ifp->if_ioctl = wm_ioctl;
   2307 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2308 		ifp->if_start = wm_nq_start;
   2309 	else
   2310 		ifp->if_start = wm_start;
   2311 	ifp->if_watchdog = wm_watchdog;
   2312 	ifp->if_init = wm_init;
   2313 	ifp->if_stop = wm_stop;
   2314 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2315 	IFQ_SET_READY(&ifp->if_snd);
   2316 
   2317 	/* Check for jumbo frame */
   2318 	switch (sc->sc_type) {
   2319 	case WM_T_82573:
   2320 		/* XXX limited to 9234 if ASPM is disabled */
   2321 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2322 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2323 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2324 		break;
   2325 	case WM_T_82571:
   2326 	case WM_T_82572:
   2327 	case WM_T_82574:
   2328 	case WM_T_82575:
   2329 	case WM_T_82576:
   2330 	case WM_T_82580:
   2331 	case WM_T_I350:
   2332 	case WM_T_I354: /* XXXX ok? */
   2333 	case WM_T_I210:
   2334 	case WM_T_I211:
   2335 	case WM_T_80003:
   2336 	case WM_T_ICH9:
   2337 	case WM_T_ICH10:
   2338 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2339 	case WM_T_PCH_LPT:
   2340 		/* XXX limited to 9234 */
   2341 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2342 		break;
   2343 	case WM_T_PCH:
   2344 		/* XXX limited to 4096 */
   2345 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2346 		break;
   2347 	case WM_T_82542_2_0:
   2348 	case WM_T_82542_2_1:
   2349 	case WM_T_82583:
   2350 	case WM_T_ICH8:
   2351 		/* No support for jumbo frame */
   2352 		break;
   2353 	default:
   2354 		/* ETHER_MAX_LEN_JUMBO */
   2355 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2356 		break;
   2357 	}
   2358 
   2359 	/* If we're a i82543 or greater, we can support VLANs. */
   2360 	if (sc->sc_type >= WM_T_82543)
   2361 		sc->sc_ethercom.ec_capabilities |=
   2362 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2363 
   2364 	/*
   2365 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2366 	 * on i82543 and later.
   2367 	 */
   2368 	if (sc->sc_type >= WM_T_82543) {
   2369 		ifp->if_capabilities |=
   2370 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2371 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2372 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2373 		    IFCAP_CSUM_TCPv6_Tx |
   2374 		    IFCAP_CSUM_UDPv6_Tx;
   2375 	}
   2376 
   2377 	/*
   2378 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2379 	 *
   2380 	 *	82541GI (8086:1076) ... no
   2381 	 *	82572EI (8086:10b9) ... yes
   2382 	 */
   2383 	if (sc->sc_type >= WM_T_82571) {
   2384 		ifp->if_capabilities |=
   2385 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2386 	}
   2387 
   2388 	/*
   2389 	 * If we're a i82544 or greater (except i82547), we can do
   2390 	 * TCP segmentation offload.
   2391 	 */
   2392 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2393 		ifp->if_capabilities |= IFCAP_TSOv4;
   2394 	}
   2395 
   2396 	if (sc->sc_type >= WM_T_82571) {
   2397 		ifp->if_capabilities |= IFCAP_TSOv6;
   2398 	}
   2399 
   2400 #ifdef WM_MPSAFE
   2401 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2402 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2403 #else
   2404 	sc->sc_tx_lock = NULL;
   2405 	sc->sc_rx_lock = NULL;
   2406 #endif
   2407 
   2408 	/* Attach the interface. */
   2409 	if_attach(ifp);
   2410 	ether_ifattach(ifp, enaddr);
   2411 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2412 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2413 			  RND_FLAG_DEFAULT);
   2414 
   2415 #ifdef WM_EVENT_COUNTERS
   2416 	/* Attach event counters. */
   2417 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2418 	    NULL, xname, "txsstall");
   2419 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2420 	    NULL, xname, "txdstall");
   2421 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2422 	    NULL, xname, "txfifo_stall");
   2423 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2424 	    NULL, xname, "txdw");
   2425 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2426 	    NULL, xname, "txqe");
   2427 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2428 	    NULL, xname, "rxintr");
   2429 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2430 	    NULL, xname, "linkintr");
   2431 
   2432 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2433 	    NULL, xname, "rxipsum");
   2434 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2435 	    NULL, xname, "rxtusum");
   2436 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2437 	    NULL, xname, "txipsum");
   2438 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2439 	    NULL, xname, "txtusum");
   2440 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2441 	    NULL, xname, "txtusum6");
   2442 
   2443 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2444 	    NULL, xname, "txtso");
   2445 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2446 	    NULL, xname, "txtso6");
   2447 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2448 	    NULL, xname, "txtsopain");
   2449 
   2450 	for (i = 0; i < WM_NTXSEGS; i++) {
   2451 		snprintf(wm_txseg_evcnt_names[i],
   2452 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2453 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2454 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2455 	}
   2456 
   2457 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2458 	    NULL, xname, "txdrop");
   2459 
   2460 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2461 	    NULL, xname, "tu");
   2462 
   2463 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2464 	    NULL, xname, "tx_xoff");
   2465 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2466 	    NULL, xname, "tx_xon");
   2467 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2468 	    NULL, xname, "rx_xoff");
   2469 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2470 	    NULL, xname, "rx_xon");
   2471 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2472 	    NULL, xname, "rx_macctl");
   2473 #endif /* WM_EVENT_COUNTERS */
   2474 
   2475 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2476 		pmf_class_network_register(self, ifp);
   2477 	else
   2478 		aprint_error_dev(self, "couldn't establish power handler\n");
   2479 
   2480 	sc->sc_flags |= WM_F_ATTACHED;
   2481 	return;
   2482 
   2483 	/*
   2484 	 * Free any resources we've allocated during the failed attach
   2485 	 * attempt.  Do this in reverse order and fall through.
   2486 	 */
   2487  fail_5:
   2488 	for (i = 0; i < WM_NRXDESC; i++) {
   2489 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2490 			bus_dmamap_destroy(sc->sc_dmat,
   2491 			    sc->sc_rxsoft[i].rxs_dmamap);
   2492 	}
   2493  fail_4:
   2494 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2495 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2496 			bus_dmamap_destroy(sc->sc_dmat,
   2497 			    sc->sc_txsoft[i].txs_dmamap);
   2498 	}
   2499 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2500  fail_3:
   2501 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2502  fail_2:
   2503 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2504 	    sc->sc_cd_size);
   2505  fail_1:
   2506 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2507  fail_0:
   2508 	return;
   2509 }
   2510 
   2511 /* The detach function (ca_detach) */
   2512 static int
   2513 wm_detach(device_t self, int flags __unused)
   2514 {
   2515 	struct wm_softc *sc = device_private(self);
   2516 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2517 	int i;
   2518 #ifndef WM_MPSAFE
   2519 	int s;
   2520 #endif
   2521 
   2522 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2523 		return 0;
   2524 
   2525 #ifndef WM_MPSAFE
   2526 	s = splnet();
   2527 #endif
   2528 	/* Stop the interface. Callouts are stopped in it. */
   2529 	wm_stop(ifp, 1);
   2530 
   2531 #ifndef WM_MPSAFE
   2532 	splx(s);
   2533 #endif
   2534 
   2535 	pmf_device_deregister(self);
   2536 
   2537 	/* Tell the firmware about the release */
   2538 	WM_BOTH_LOCK(sc);
   2539 	wm_release_manageability(sc);
   2540 	wm_release_hw_control(sc);
   2541 	WM_BOTH_UNLOCK(sc);
   2542 
   2543 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2544 
   2545 	/* Delete all remaining media. */
   2546 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2547 
   2548 	ether_ifdetach(ifp);
   2549 	if_detach(ifp);
   2550 
   2551 
   2552 	/* Unload RX dmamaps and free mbufs */
   2553 	WM_RX_LOCK(sc);
   2554 	wm_rxdrain(sc);
   2555 	WM_RX_UNLOCK(sc);
   2556 	/* Must unlock here */
   2557 
   2558 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2559 	for (i = 0; i < WM_NRXDESC; i++) {
   2560 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2561 			bus_dmamap_destroy(sc->sc_dmat,
   2562 			    sc->sc_rxsoft[i].rxs_dmamap);
   2563 	}
   2564 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2565 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2566 			bus_dmamap_destroy(sc->sc_dmat,
   2567 			    sc->sc_txsoft[i].txs_dmamap);
   2568 	}
   2569 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2570 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2571 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2572 	    sc->sc_cd_size);
   2573 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2574 
   2575 	/* Disestablish the interrupt handler */
   2576 	if (sc->sc_ih != NULL) {
   2577 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2578 		sc->sc_ih = NULL;
   2579 	}
   2580 
   2581 	/* Unmap the registers */
   2582 	if (sc->sc_ss) {
   2583 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2584 		sc->sc_ss = 0;
   2585 	}
   2586 
   2587 	if (sc->sc_ios) {
   2588 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2589 		sc->sc_ios = 0;
   2590 	}
   2591 
   2592 	if (sc->sc_tx_lock)
   2593 		mutex_obj_free(sc->sc_tx_lock);
   2594 	if (sc->sc_rx_lock)
   2595 		mutex_obj_free(sc->sc_rx_lock);
   2596 
   2597 	return 0;
   2598 }
   2599 
   2600 static bool
   2601 wm_suspend(device_t self, const pmf_qual_t *qual)
   2602 {
   2603 	struct wm_softc *sc = device_private(self);
   2604 
   2605 	wm_release_manageability(sc);
   2606 	wm_release_hw_control(sc);
   2607 #ifdef WM_WOL
   2608 	wm_enable_wakeup(sc);
   2609 #endif
   2610 
   2611 	return true;
   2612 }
   2613 
   2614 static bool
   2615 wm_resume(device_t self, const pmf_qual_t *qual)
   2616 {
   2617 	struct wm_softc *sc = device_private(self);
   2618 
   2619 	wm_init_manageability(sc);
   2620 
   2621 	return true;
   2622 }
   2623 
   2624 /*
   2625  * wm_watchdog:		[ifnet interface function]
   2626  *
   2627  *	Watchdog timer handler.
   2628  */
   2629 static void
   2630 wm_watchdog(struct ifnet *ifp)
   2631 {
   2632 	struct wm_softc *sc = ifp->if_softc;
   2633 
   2634 	/*
   2635 	 * Since we're using delayed interrupts, sweep up
   2636 	 * before we report an error.
   2637 	 */
   2638 	WM_TX_LOCK(sc);
   2639 	wm_txintr(sc);
   2640 	WM_TX_UNLOCK(sc);
   2641 
   2642 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2643 #ifdef WM_DEBUG
   2644 		int i, j;
   2645 		struct wm_txsoft *txs;
   2646 #endif
   2647 		log(LOG_ERR,
   2648 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2649 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   2650 		    sc->sc_txnext);
   2651 		ifp->if_oerrors++;
   2652 #ifdef WM_DEBUG
   2653 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   2654 		    i = WM_NEXTTXS(sc, i)) {
   2655 		    txs = &sc->sc_txsoft[i];
   2656 		    printf("txs %d tx %d -> %d\n",
   2657 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2658 		    for (j = txs->txs_firstdesc; ;
   2659 			j = WM_NEXTTX(sc, j)) {
   2660 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2661 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2662 			printf("\t %#08x%08x\n",
   2663 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2664 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2665 			if (j == txs->txs_lastdesc)
   2666 				break;
   2667 			}
   2668 		}
   2669 #endif
   2670 		/* Reset the interface. */
   2671 		(void) wm_init(ifp);
   2672 	}
   2673 
   2674 	/* Try to get more packets going. */
   2675 	ifp->if_start(ifp);
   2676 }
   2677 
   2678 /*
   2679  * wm_tick:
   2680  *
   2681  *	One second timer, used to check link status, sweep up
   2682  *	completed transmit jobs, etc.
   2683  */
   2684 static void
   2685 wm_tick(void *arg)
   2686 {
   2687 	struct wm_softc *sc = arg;
   2688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2689 #ifndef WM_MPSAFE
   2690 	int s;
   2691 
   2692 	s = splnet();
   2693 #endif
   2694 
   2695 	WM_TX_LOCK(sc);
   2696 
   2697 	if (sc->sc_stopping)
   2698 		goto out;
   2699 
   2700 	if (sc->sc_type >= WM_T_82542_2_1) {
   2701 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2702 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2703 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2704 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2705 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2706 	}
   2707 
   2708 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2709 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2710 	    + CSR_READ(sc, WMREG_CRCERRS)
   2711 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2712 	    + CSR_READ(sc, WMREG_SYMERRC)
   2713 	    + CSR_READ(sc, WMREG_RXERRC)
   2714 	    + CSR_READ(sc, WMREG_SEC)
   2715 	    + CSR_READ(sc, WMREG_CEXTERR)
   2716 	    + CSR_READ(sc, WMREG_RLEC);
   2717 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2718 
   2719 	if (sc->sc_flags & WM_F_HAS_MII)
   2720 		mii_tick(&sc->sc_mii);
   2721 	else if ((sc->sc_type >= WM_T_82575)
   2722 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2723 		wm_serdes_tick(sc);
   2724 	else
   2725 		wm_tbi_tick(sc);
   2726 
   2727 out:
   2728 	WM_TX_UNLOCK(sc);
   2729 #ifndef WM_MPSAFE
   2730 	splx(s);
   2731 #endif
   2732 
   2733 	if (!sc->sc_stopping)
   2734 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2735 }
   2736 
   2737 static int
   2738 wm_ifflags_cb(struct ethercom *ec)
   2739 {
   2740 	struct ifnet *ifp = &ec->ec_if;
   2741 	struct wm_softc *sc = ifp->if_softc;
   2742 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2743 	int rc = 0;
   2744 
   2745 	WM_BOTH_LOCK(sc);
   2746 
   2747 	if (change != 0)
   2748 		sc->sc_if_flags = ifp->if_flags;
   2749 
   2750 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2751 		rc = ENETRESET;
   2752 		goto out;
   2753 	}
   2754 
   2755 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2756 		wm_set_filter(sc);
   2757 
   2758 	wm_set_vlan(sc);
   2759 
   2760 out:
   2761 	WM_BOTH_UNLOCK(sc);
   2762 
   2763 	return rc;
   2764 }
   2765 
   2766 /*
   2767  * wm_ioctl:		[ifnet interface function]
   2768  *
   2769  *	Handle control requests from the operator.
   2770  */
   2771 static int
   2772 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2773 {
   2774 	struct wm_softc *sc = ifp->if_softc;
   2775 	struct ifreq *ifr = (struct ifreq *) data;
   2776 	struct ifaddr *ifa = (struct ifaddr *)data;
   2777 	struct sockaddr_dl *sdl;
   2778 	int s, error;
   2779 
   2780 #ifndef WM_MPSAFE
   2781 	s = splnet();
   2782 #endif
   2783 	switch (cmd) {
   2784 	case SIOCSIFMEDIA:
   2785 	case SIOCGIFMEDIA:
   2786 		WM_BOTH_LOCK(sc);
   2787 		/* Flow control requires full-duplex mode. */
   2788 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2789 		    (ifr->ifr_media & IFM_FDX) == 0)
   2790 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2791 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2792 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2793 				/* We can do both TXPAUSE and RXPAUSE. */
   2794 				ifr->ifr_media |=
   2795 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2796 			}
   2797 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2798 		}
   2799 		WM_BOTH_UNLOCK(sc);
   2800 #ifdef WM_MPSAFE
   2801 		s = splnet();
   2802 #endif
   2803 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2804 #ifdef WM_MPSAFE
   2805 		splx(s);
   2806 #endif
   2807 		break;
   2808 	case SIOCINITIFADDR:
   2809 		WM_BOTH_LOCK(sc);
   2810 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2811 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2812 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2813 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2814 			/* unicast address is first multicast entry */
   2815 			wm_set_filter(sc);
   2816 			error = 0;
   2817 			WM_BOTH_UNLOCK(sc);
   2818 			break;
   2819 		}
   2820 		WM_BOTH_UNLOCK(sc);
   2821 		/*FALLTHROUGH*/
   2822 	default:
   2823 #ifdef WM_MPSAFE
   2824 		s = splnet();
   2825 #endif
   2826 		/* It may call wm_start, so unlock here */
   2827 		error = ether_ioctl(ifp, cmd, data);
   2828 #ifdef WM_MPSAFE
   2829 		splx(s);
   2830 #endif
   2831 		if (error != ENETRESET)
   2832 			break;
   2833 
   2834 		error = 0;
   2835 
   2836 		if (cmd == SIOCSIFCAP) {
   2837 			error = (*ifp->if_init)(ifp);
   2838 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2839 			;
   2840 		else if (ifp->if_flags & IFF_RUNNING) {
   2841 			/*
   2842 			 * Multicast list has changed; set the hardware filter
   2843 			 * accordingly.
   2844 			 */
   2845 			WM_BOTH_LOCK(sc);
   2846 			wm_set_filter(sc);
   2847 			WM_BOTH_UNLOCK(sc);
   2848 		}
   2849 		break;
   2850 	}
   2851 
   2852 	/* Try to get more packets going. */
   2853 	ifp->if_start(ifp);
   2854 
   2855 #ifndef WM_MPSAFE
   2856 	splx(s);
   2857 #endif
   2858 	return error;
   2859 }
   2860 
   2861 /* MAC address related */
   2862 
   2863 /*
   2864  * Get the offset of MAC address and return it.
   2865  * If error occured, use offset 0.
   2866  */
   2867 static uint16_t
   2868 wm_check_alt_mac_addr(struct wm_softc *sc)
   2869 {
   2870 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2871 	uint16_t offset = NVM_OFF_MACADDR;
   2872 
   2873 	/* Try to read alternative MAC address pointer */
   2874 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2875 		return 0;
   2876 
   2877 	/* Check pointer if it's valid or not. */
   2878 	if ((offset == 0x0000) || (offset == 0xffff))
   2879 		return 0;
   2880 
   2881 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2882 	/*
   2883 	 * Check whether alternative MAC address is valid or not.
   2884 	 * Some cards have non 0xffff pointer but those don't use
   2885 	 * alternative MAC address in reality.
   2886 	 *
   2887 	 * Check whether the broadcast bit is set or not.
   2888 	 */
   2889 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2890 		if (((myea[0] & 0xff) & 0x01) == 0)
   2891 			return offset; /* Found */
   2892 
   2893 	/* Not found */
   2894 	return 0;
   2895 }
   2896 
   2897 static int
   2898 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2899 {
   2900 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2901 	uint16_t offset = NVM_OFF_MACADDR;
   2902 	int do_invert = 0;
   2903 
   2904 	switch (sc->sc_type) {
   2905 	case WM_T_82580:
   2906 	case WM_T_I350:
   2907 	case WM_T_I354:
   2908 		/* EEPROM Top Level Partitioning */
   2909 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2910 		break;
   2911 	case WM_T_82571:
   2912 	case WM_T_82575:
   2913 	case WM_T_82576:
   2914 	case WM_T_80003:
   2915 	case WM_T_I210:
   2916 	case WM_T_I211:
   2917 		offset = wm_check_alt_mac_addr(sc);
   2918 		if (offset == 0)
   2919 			if ((sc->sc_funcid & 0x01) == 1)
   2920 				do_invert = 1;
   2921 		break;
   2922 	default:
   2923 		if ((sc->sc_funcid & 0x01) == 1)
   2924 			do_invert = 1;
   2925 		break;
   2926 	}
   2927 
   2928 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2929 		myea) != 0)
   2930 		goto bad;
   2931 
   2932 	enaddr[0] = myea[0] & 0xff;
   2933 	enaddr[1] = myea[0] >> 8;
   2934 	enaddr[2] = myea[1] & 0xff;
   2935 	enaddr[3] = myea[1] >> 8;
   2936 	enaddr[4] = myea[2] & 0xff;
   2937 	enaddr[5] = myea[2] >> 8;
   2938 
   2939 	/*
   2940 	 * Toggle the LSB of the MAC address on the second port
   2941 	 * of some dual port cards.
   2942 	 */
   2943 	if (do_invert != 0)
   2944 		enaddr[5] ^= 1;
   2945 
   2946 	return 0;
   2947 
   2948  bad:
   2949 	return -1;
   2950 }
   2951 
   2952 /*
   2953  * wm_set_ral:
   2954  *
   2955  *	Set an entery in the receive address list.
   2956  */
   2957 static void
   2958 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2959 {
   2960 	uint32_t ral_lo, ral_hi;
   2961 
   2962 	if (enaddr != NULL) {
   2963 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2964 		    (enaddr[3] << 24);
   2965 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2966 		ral_hi |= RAL_AV;
   2967 	} else {
   2968 		ral_lo = 0;
   2969 		ral_hi = 0;
   2970 	}
   2971 
   2972 	if (sc->sc_type >= WM_T_82544) {
   2973 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2974 		    ral_lo);
   2975 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2976 		    ral_hi);
   2977 	} else {
   2978 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2979 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2980 	}
   2981 }
   2982 
   2983 /*
   2984  * wm_mchash:
   2985  *
   2986  *	Compute the hash of the multicast address for the 4096-bit
   2987  *	multicast filter.
   2988  */
   2989 static uint32_t
   2990 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2991 {
   2992 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2993 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2994 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2995 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2996 	uint32_t hash;
   2997 
   2998 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2999 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3000 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3001 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3002 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3003 		return (hash & 0x3ff);
   3004 	}
   3005 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3006 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3007 
   3008 	return (hash & 0xfff);
   3009 }
   3010 
   3011 /*
   3012  * wm_set_filter:
   3013  *
   3014  *	Set up the receive filter.
   3015  */
   3016 static void
   3017 wm_set_filter(struct wm_softc *sc)
   3018 {
   3019 	struct ethercom *ec = &sc->sc_ethercom;
   3020 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3021 	struct ether_multi *enm;
   3022 	struct ether_multistep step;
   3023 	bus_addr_t mta_reg;
   3024 	uint32_t hash, reg, bit;
   3025 	int i, size;
   3026 
   3027 	if (sc->sc_type >= WM_T_82544)
   3028 		mta_reg = WMREG_CORDOVA_MTA;
   3029 	else
   3030 		mta_reg = WMREG_MTA;
   3031 
   3032 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3033 
   3034 	if (ifp->if_flags & IFF_BROADCAST)
   3035 		sc->sc_rctl |= RCTL_BAM;
   3036 	if (ifp->if_flags & IFF_PROMISC) {
   3037 		sc->sc_rctl |= RCTL_UPE;
   3038 		goto allmulti;
   3039 	}
   3040 
   3041 	/*
   3042 	 * Set the station address in the first RAL slot, and
   3043 	 * clear the remaining slots.
   3044 	 */
   3045 	if (sc->sc_type == WM_T_ICH8)
   3046 		size = WM_RAL_TABSIZE_ICH8 -1;
   3047 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3048 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3049 	    || (sc->sc_type == WM_T_PCH_LPT))
   3050 		size = WM_RAL_TABSIZE_ICH8;
   3051 	else if (sc->sc_type == WM_T_82575)
   3052 		size = WM_RAL_TABSIZE_82575;
   3053 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3054 		size = WM_RAL_TABSIZE_82576;
   3055 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3056 		size = WM_RAL_TABSIZE_I350;
   3057 	else
   3058 		size = WM_RAL_TABSIZE;
   3059 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3060 	for (i = 1; i < size; i++)
   3061 		wm_set_ral(sc, NULL, i);
   3062 
   3063 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3064 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3065 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3066 		size = WM_ICH8_MC_TABSIZE;
   3067 	else
   3068 		size = WM_MC_TABSIZE;
   3069 	/* Clear out the multicast table. */
   3070 	for (i = 0; i < size; i++)
   3071 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3072 
   3073 	ETHER_FIRST_MULTI(step, ec, enm);
   3074 	while (enm != NULL) {
   3075 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3076 			/*
   3077 			 * We must listen to a range of multicast addresses.
   3078 			 * For now, just accept all multicasts, rather than
   3079 			 * trying to set only those filter bits needed to match
   3080 			 * the range.  (At this time, the only use of address
   3081 			 * ranges is for IP multicast routing, for which the
   3082 			 * range is big enough to require all bits set.)
   3083 			 */
   3084 			goto allmulti;
   3085 		}
   3086 
   3087 		hash = wm_mchash(sc, enm->enm_addrlo);
   3088 
   3089 		reg = (hash >> 5);
   3090 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3091 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3092 		    || (sc->sc_type == WM_T_PCH2)
   3093 		    || (sc->sc_type == WM_T_PCH_LPT))
   3094 			reg &= 0x1f;
   3095 		else
   3096 			reg &= 0x7f;
   3097 		bit = hash & 0x1f;
   3098 
   3099 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3100 		hash |= 1U << bit;
   3101 
   3102 		/* XXX Hardware bug?? */
   3103 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3104 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3105 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3106 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3107 		} else
   3108 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3109 
   3110 		ETHER_NEXT_MULTI(step, enm);
   3111 	}
   3112 
   3113 	ifp->if_flags &= ~IFF_ALLMULTI;
   3114 	goto setit;
   3115 
   3116  allmulti:
   3117 	ifp->if_flags |= IFF_ALLMULTI;
   3118 	sc->sc_rctl |= RCTL_MPE;
   3119 
   3120  setit:
   3121 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3122 }
   3123 
   3124 /* Reset and init related */
   3125 
   3126 static void
   3127 wm_set_vlan(struct wm_softc *sc)
   3128 {
   3129 	/* Deal with VLAN enables. */
   3130 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3131 		sc->sc_ctrl |= CTRL_VME;
   3132 	else
   3133 		sc->sc_ctrl &= ~CTRL_VME;
   3134 
   3135 	/* Write the control registers. */
   3136 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3137 }
   3138 
   3139 static void
   3140 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3141 {
   3142 	uint32_t gcr;
   3143 	pcireg_t ctrl2;
   3144 
   3145 	gcr = CSR_READ(sc, WMREG_GCR);
   3146 
   3147 	/* Only take action if timeout value is defaulted to 0 */
   3148 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3149 		goto out;
   3150 
   3151 	if ((gcr & GCR_CAP_VER2) == 0) {
   3152 		gcr |= GCR_CMPL_TMOUT_10MS;
   3153 		goto out;
   3154 	}
   3155 
   3156 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3157 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3158 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3159 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3160 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3161 
   3162 out:
   3163 	/* Disable completion timeout resend */
   3164 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3165 
   3166 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3167 }
   3168 
   3169 void
   3170 wm_get_auto_rd_done(struct wm_softc *sc)
   3171 {
   3172 	int i;
   3173 
   3174 	/* wait for eeprom to reload */
   3175 	switch (sc->sc_type) {
   3176 	case WM_T_82571:
   3177 	case WM_T_82572:
   3178 	case WM_T_82573:
   3179 	case WM_T_82574:
   3180 	case WM_T_82583:
   3181 	case WM_T_82575:
   3182 	case WM_T_82576:
   3183 	case WM_T_82580:
   3184 	case WM_T_I350:
   3185 	case WM_T_I354:
   3186 	case WM_T_I210:
   3187 	case WM_T_I211:
   3188 	case WM_T_80003:
   3189 	case WM_T_ICH8:
   3190 	case WM_T_ICH9:
   3191 		for (i = 0; i < 10; i++) {
   3192 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3193 				break;
   3194 			delay(1000);
   3195 		}
   3196 		if (i == 10) {
   3197 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3198 			    "complete\n", device_xname(sc->sc_dev));
   3199 		}
   3200 		break;
   3201 	default:
   3202 		break;
   3203 	}
   3204 }
   3205 
   3206 void
   3207 wm_lan_init_done(struct wm_softc *sc)
   3208 {
   3209 	uint32_t reg = 0;
   3210 	int i;
   3211 
   3212 	/* wait for eeprom to reload */
   3213 	switch (sc->sc_type) {
   3214 	case WM_T_ICH10:
   3215 	case WM_T_PCH:
   3216 	case WM_T_PCH2:
   3217 	case WM_T_PCH_LPT:
   3218 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3219 			reg = CSR_READ(sc, WMREG_STATUS);
   3220 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3221 				break;
   3222 			delay(100);
   3223 		}
   3224 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3225 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3226 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3227 		}
   3228 		break;
   3229 	default:
   3230 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3231 		    __func__);
   3232 		break;
   3233 	}
   3234 
   3235 	reg &= ~STATUS_LAN_INIT_DONE;
   3236 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3237 }
   3238 
   3239 void
   3240 wm_get_cfg_done(struct wm_softc *sc)
   3241 {
   3242 	int mask;
   3243 	uint32_t reg;
   3244 	int i;
   3245 
   3246 	/* wait for eeprom to reload */
   3247 	switch (sc->sc_type) {
   3248 	case WM_T_82542_2_0:
   3249 	case WM_T_82542_2_1:
   3250 		/* null */
   3251 		break;
   3252 	case WM_T_82543:
   3253 	case WM_T_82544:
   3254 	case WM_T_82540:
   3255 	case WM_T_82545:
   3256 	case WM_T_82545_3:
   3257 	case WM_T_82546:
   3258 	case WM_T_82546_3:
   3259 	case WM_T_82541:
   3260 	case WM_T_82541_2:
   3261 	case WM_T_82547:
   3262 	case WM_T_82547_2:
   3263 	case WM_T_82573:
   3264 	case WM_T_82574:
   3265 	case WM_T_82583:
   3266 		/* generic */
   3267 		delay(10*1000);
   3268 		break;
   3269 	case WM_T_80003:
   3270 	case WM_T_82571:
   3271 	case WM_T_82572:
   3272 	case WM_T_82575:
   3273 	case WM_T_82576:
   3274 	case WM_T_82580:
   3275 	case WM_T_I350:
   3276 	case WM_T_I354:
   3277 	case WM_T_I210:
   3278 	case WM_T_I211:
   3279 		if (sc->sc_type == WM_T_82571) {
   3280 			/* Only 82571 shares port 0 */
   3281 			mask = EEMNGCTL_CFGDONE_0;
   3282 		} else
   3283 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3284 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3285 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3286 				break;
   3287 			delay(1000);
   3288 		}
   3289 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3290 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3291 				device_xname(sc->sc_dev), __func__));
   3292 		}
   3293 		break;
   3294 	case WM_T_ICH8:
   3295 	case WM_T_ICH9:
   3296 	case WM_T_ICH10:
   3297 	case WM_T_PCH:
   3298 	case WM_T_PCH2:
   3299 	case WM_T_PCH_LPT:
   3300 		delay(10*1000);
   3301 		if (sc->sc_type >= WM_T_ICH10)
   3302 			wm_lan_init_done(sc);
   3303 		else
   3304 			wm_get_auto_rd_done(sc);
   3305 
   3306 		reg = CSR_READ(sc, WMREG_STATUS);
   3307 		if ((reg & STATUS_PHYRA) != 0)
   3308 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3309 		break;
   3310 	default:
   3311 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3312 		    __func__);
   3313 		break;
   3314 	}
   3315 }
   3316 
   3317 /* Init hardware bits */
   3318 void
   3319 wm_initialize_hardware_bits(struct wm_softc *sc)
   3320 {
   3321 	uint32_t tarc0, tarc1, reg;
   3322 
   3323 	/* For 82571 variant, 80003 and ICHs */
   3324 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3325 	    || (sc->sc_type >= WM_T_80003)) {
   3326 
   3327 		/* Transmit Descriptor Control 0 */
   3328 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3329 		reg |= TXDCTL_COUNT_DESC;
   3330 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3331 
   3332 		/* Transmit Descriptor Control 1 */
   3333 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3334 		reg |= TXDCTL_COUNT_DESC;
   3335 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3336 
   3337 		/* TARC0 */
   3338 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3339 		switch (sc->sc_type) {
   3340 		case WM_T_82571:
   3341 		case WM_T_82572:
   3342 		case WM_T_82573:
   3343 		case WM_T_82574:
   3344 		case WM_T_82583:
   3345 		case WM_T_80003:
   3346 			/* Clear bits 30..27 */
   3347 			tarc0 &= ~__BITS(30, 27);
   3348 			break;
   3349 		default:
   3350 			break;
   3351 		}
   3352 
   3353 		switch (sc->sc_type) {
   3354 		case WM_T_82571:
   3355 		case WM_T_82572:
   3356 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3357 
   3358 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3359 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3360 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3361 			/* 8257[12] Errata No.7 */
   3362 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3363 
   3364 			/* TARC1 bit 28 */
   3365 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3366 				tarc1 &= ~__BIT(28);
   3367 			else
   3368 				tarc1 |= __BIT(28);
   3369 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3370 
   3371 			/*
   3372 			 * 8257[12] Errata No.13
   3373 			 * Disable Dyamic Clock Gating.
   3374 			 */
   3375 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3376 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3377 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3378 			break;
   3379 		case WM_T_82573:
   3380 		case WM_T_82574:
   3381 		case WM_T_82583:
   3382 			if ((sc->sc_type == WM_T_82574)
   3383 			    || (sc->sc_type == WM_T_82583))
   3384 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3385 
   3386 			/* Extended Device Control */
   3387 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3388 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3389 			reg |= __BIT(22);	/* Set bit 22 */
   3390 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3391 
   3392 			/* Device Control */
   3393 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3394 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3395 
   3396 			/* PCIe Control Register */
   3397 			if ((sc->sc_type == WM_T_82574)
   3398 			    || (sc->sc_type == WM_T_82583)) {
   3399 				/*
   3400 				 * Document says this bit must be set for
   3401 				 * proper operation.
   3402 				 */
   3403 				reg = CSR_READ(sc, WMREG_GCR);
   3404 				reg |= __BIT(22);
   3405 				CSR_WRITE(sc, WMREG_GCR, reg);
   3406 
   3407 				/*
   3408 				 * Apply workaround for hardware errata
   3409 				 * documented in errata docs Fixes issue where
   3410 				 * some error prone or unreliable PCIe
   3411 				 * completions are occurring, particularly
   3412 				 * with ASPM enabled. Without fix, issue can
   3413 				 * cause Tx timeouts.
   3414 				 */
   3415 				reg = CSR_READ(sc, WMREG_GCR2);
   3416 				reg |= __BIT(0);
   3417 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3418 			}
   3419 			break;
   3420 		case WM_T_80003:
   3421 			/* TARC0 */
   3422 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3423 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3424 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3425 
   3426 			/* TARC1 bit 28 */
   3427 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3428 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3429 				tarc1 &= ~__BIT(28);
   3430 			else
   3431 				tarc1 |= __BIT(28);
   3432 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3433 			break;
   3434 		case WM_T_ICH8:
   3435 		case WM_T_ICH9:
   3436 		case WM_T_ICH10:
   3437 		case WM_T_PCH:
   3438 		case WM_T_PCH2:
   3439 		case WM_T_PCH_LPT:
   3440 			/* TARC 0 */
   3441 			if (sc->sc_type == WM_T_ICH8) {
   3442 				/* Set TARC0 bits 29 and 28 */
   3443 				tarc0 |= __BITS(29, 28);
   3444 			}
   3445 			/* Set TARC0 bits 23,24,26,27 */
   3446 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3447 
   3448 			/* CTRL_EXT */
   3449 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3450 			reg |= __BIT(22);	/* Set bit 22 */
   3451 			/*
   3452 			 * Enable PHY low-power state when MAC is at D3
   3453 			 * w/o WoL
   3454 			 */
   3455 			if (sc->sc_type >= WM_T_PCH)
   3456 				reg |= CTRL_EXT_PHYPDEN;
   3457 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3458 
   3459 			/* TARC1 */
   3460 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3461 			/* bit 28 */
   3462 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3463 				tarc1 &= ~__BIT(28);
   3464 			else
   3465 				tarc1 |= __BIT(28);
   3466 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3467 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3468 
   3469 			/* Device Status */
   3470 			if (sc->sc_type == WM_T_ICH8) {
   3471 				reg = CSR_READ(sc, WMREG_STATUS);
   3472 				reg &= ~__BIT(31);
   3473 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3474 
   3475 			}
   3476 
   3477 			/*
   3478 			 * Work-around descriptor data corruption issue during
   3479 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3480 			 * capability.
   3481 			 */
   3482 			reg = CSR_READ(sc, WMREG_RFCTL);
   3483 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3484 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3485 			break;
   3486 		default:
   3487 			break;
   3488 		}
   3489 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3490 
   3491 		/*
   3492 		 * 8257[12] Errata No.52 and some others.
   3493 		 * Avoid RSS Hash Value bug.
   3494 		 */
   3495 		switch (sc->sc_type) {
   3496 		case WM_T_82571:
   3497 		case WM_T_82572:
   3498 		case WM_T_82573:
   3499 		case WM_T_80003:
   3500 		case WM_T_ICH8:
   3501 			reg = CSR_READ(sc, WMREG_RFCTL);
   3502 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3503 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3504 			break;
   3505 		default:
   3506 			break;
   3507 		}
   3508 	}
   3509 }
   3510 
   3511 static uint32_t
   3512 wm_rxpbs_adjust_82580(uint32_t val)
   3513 {
   3514 	uint32_t rv = 0;
   3515 
   3516 	if (val < __arraycount(wm_82580_rxpbs_table))
   3517 		rv = wm_82580_rxpbs_table[val];
   3518 
   3519 	return rv;
   3520 }
   3521 
   3522 /*
   3523  * wm_reset:
   3524  *
   3525  *	Reset the i82542 chip.
   3526  */
   3527 static void
   3528 wm_reset(struct wm_softc *sc)
   3529 {
   3530 	int phy_reset = 0;
   3531 	int error = 0;
   3532 	uint32_t reg, mask;
   3533 
   3534 	/*
   3535 	 * Allocate on-chip memory according to the MTU size.
   3536 	 * The Packet Buffer Allocation register must be written
   3537 	 * before the chip is reset.
   3538 	 */
   3539 	switch (sc->sc_type) {
   3540 	case WM_T_82547:
   3541 	case WM_T_82547_2:
   3542 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3543 		    PBA_22K : PBA_30K;
   3544 		sc->sc_txfifo_head = 0;
   3545 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3546 		sc->sc_txfifo_size =
   3547 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3548 		sc->sc_txfifo_stall = 0;
   3549 		break;
   3550 	case WM_T_82571:
   3551 	case WM_T_82572:
   3552 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3553 	case WM_T_80003:
   3554 		sc->sc_pba = PBA_32K;
   3555 		break;
   3556 	case WM_T_82573:
   3557 		sc->sc_pba = PBA_12K;
   3558 		break;
   3559 	case WM_T_82574:
   3560 	case WM_T_82583:
   3561 		sc->sc_pba = PBA_20K;
   3562 		break;
   3563 	case WM_T_82576:
   3564 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3565 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3566 		break;
   3567 	case WM_T_82580:
   3568 	case WM_T_I350:
   3569 	case WM_T_I354:
   3570 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3571 		break;
   3572 	case WM_T_I210:
   3573 	case WM_T_I211:
   3574 		sc->sc_pba = PBA_34K;
   3575 		break;
   3576 	case WM_T_ICH8:
   3577 		/* Workaround for a bit corruption issue in FIFO memory */
   3578 		sc->sc_pba = PBA_8K;
   3579 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3580 		break;
   3581 	case WM_T_ICH9:
   3582 	case WM_T_ICH10:
   3583 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3584 		    PBA_14K : PBA_10K;
   3585 		break;
   3586 	case WM_T_PCH:
   3587 	case WM_T_PCH2:
   3588 	case WM_T_PCH_LPT:
   3589 		sc->sc_pba = PBA_26K;
   3590 		break;
   3591 	default:
   3592 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3593 		    PBA_40K : PBA_48K;
   3594 		break;
   3595 	}
   3596 	/*
   3597 	 * Only old or non-multiqueue devices have the PBA register
   3598 	 * XXX Need special handling for 82575.
   3599 	 */
   3600 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3601 	    || (sc->sc_type == WM_T_82575))
   3602 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3603 
   3604 	/* Prevent the PCI-E bus from sticking */
   3605 	if (sc->sc_flags & WM_F_PCIE) {
   3606 		int timeout = 800;
   3607 
   3608 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3609 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3610 
   3611 		while (timeout--) {
   3612 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3613 			    == 0)
   3614 				break;
   3615 			delay(100);
   3616 		}
   3617 	}
   3618 
   3619 	/* Set the completion timeout for interface */
   3620 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3621 	    || (sc->sc_type == WM_T_82580)
   3622 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3623 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3624 		wm_set_pcie_completion_timeout(sc);
   3625 
   3626 	/* Clear interrupt */
   3627 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3628 
   3629 	/* Stop the transmit and receive processes. */
   3630 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3631 	sc->sc_rctl &= ~RCTL_EN;
   3632 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3633 	CSR_WRITE_FLUSH(sc);
   3634 
   3635 	/* XXX set_tbi_sbp_82543() */
   3636 
   3637 	delay(10*1000);
   3638 
   3639 	/* Must acquire the MDIO ownership before MAC reset */
   3640 	switch (sc->sc_type) {
   3641 	case WM_T_82573:
   3642 	case WM_T_82574:
   3643 	case WM_T_82583:
   3644 		error = wm_get_hw_semaphore_82573(sc);
   3645 		break;
   3646 	default:
   3647 		break;
   3648 	}
   3649 
   3650 	/*
   3651 	 * 82541 Errata 29? & 82547 Errata 28?
   3652 	 * See also the description about PHY_RST bit in CTRL register
   3653 	 * in 8254x_GBe_SDM.pdf.
   3654 	 */
   3655 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3656 		CSR_WRITE(sc, WMREG_CTRL,
   3657 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3658 		CSR_WRITE_FLUSH(sc);
   3659 		delay(5000);
   3660 	}
   3661 
   3662 	switch (sc->sc_type) {
   3663 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3664 	case WM_T_82541:
   3665 	case WM_T_82541_2:
   3666 	case WM_T_82547:
   3667 	case WM_T_82547_2:
   3668 		/*
   3669 		 * On some chipsets, a reset through a memory-mapped write
   3670 		 * cycle can cause the chip to reset before completing the
   3671 		 * write cycle.  This causes major headache that can be
   3672 		 * avoided by issuing the reset via indirect register writes
   3673 		 * through I/O space.
   3674 		 *
   3675 		 * So, if we successfully mapped the I/O BAR at attach time,
   3676 		 * use that.  Otherwise, try our luck with a memory-mapped
   3677 		 * reset.
   3678 		 */
   3679 		if (sc->sc_flags & WM_F_IOH_VALID)
   3680 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3681 		else
   3682 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3683 		break;
   3684 	case WM_T_82545_3:
   3685 	case WM_T_82546_3:
   3686 		/* Use the shadow control register on these chips. */
   3687 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3688 		break;
   3689 	case WM_T_80003:
   3690 		mask = swfwphysem[sc->sc_funcid];
   3691 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3692 		wm_get_swfw_semaphore(sc, mask);
   3693 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3694 		wm_put_swfw_semaphore(sc, mask);
   3695 		break;
   3696 	case WM_T_ICH8:
   3697 	case WM_T_ICH9:
   3698 	case WM_T_ICH10:
   3699 	case WM_T_PCH:
   3700 	case WM_T_PCH2:
   3701 	case WM_T_PCH_LPT:
   3702 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3703 		if (wm_check_reset_block(sc) == 0) {
   3704 			/*
   3705 			 * Gate automatic PHY configuration by hardware on
   3706 			 * non-managed 82579
   3707 			 */
   3708 			if ((sc->sc_type == WM_T_PCH2)
   3709 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3710 				!= 0))
   3711 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3712 
   3713 
   3714 			reg |= CTRL_PHY_RESET;
   3715 			phy_reset = 1;
   3716 		}
   3717 		wm_get_swfwhw_semaphore(sc);
   3718 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3719 		/* Don't insert a completion barrier when reset */
   3720 		delay(20*1000);
   3721 		wm_put_swfwhw_semaphore(sc);
   3722 		break;
   3723 	case WM_T_82580:
   3724 	case WM_T_I350:
   3725 	case WM_T_I354:
   3726 	case WM_T_I210:
   3727 	case WM_T_I211:
   3728 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3729 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3730 			CSR_WRITE_FLUSH(sc);
   3731 		delay(5000);
   3732 		break;
   3733 	case WM_T_82542_2_0:
   3734 	case WM_T_82542_2_1:
   3735 	case WM_T_82543:
   3736 	case WM_T_82540:
   3737 	case WM_T_82545:
   3738 	case WM_T_82546:
   3739 	case WM_T_82571:
   3740 	case WM_T_82572:
   3741 	case WM_T_82573:
   3742 	case WM_T_82574:
   3743 	case WM_T_82575:
   3744 	case WM_T_82576:
   3745 	case WM_T_82583:
   3746 	default:
   3747 		/* Everything else can safely use the documented method. */
   3748 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3749 		break;
   3750 	}
   3751 
   3752 	/* Must release the MDIO ownership after MAC reset */
   3753 	switch (sc->sc_type) {
   3754 	case WM_T_82573:
   3755 	case WM_T_82574:
   3756 	case WM_T_82583:
   3757 		if (error == 0)
   3758 			wm_put_hw_semaphore_82573(sc);
   3759 		break;
   3760 	default:
   3761 		break;
   3762 	}
   3763 
   3764 	if (phy_reset != 0)
   3765 		wm_get_cfg_done(sc);
   3766 
   3767 	/* reload EEPROM */
   3768 	switch (sc->sc_type) {
   3769 	case WM_T_82542_2_0:
   3770 	case WM_T_82542_2_1:
   3771 	case WM_T_82543:
   3772 	case WM_T_82544:
   3773 		delay(10);
   3774 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3776 		CSR_WRITE_FLUSH(sc);
   3777 		delay(2000);
   3778 		break;
   3779 	case WM_T_82540:
   3780 	case WM_T_82545:
   3781 	case WM_T_82545_3:
   3782 	case WM_T_82546:
   3783 	case WM_T_82546_3:
   3784 		delay(5*1000);
   3785 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3786 		break;
   3787 	case WM_T_82541:
   3788 	case WM_T_82541_2:
   3789 	case WM_T_82547:
   3790 	case WM_T_82547_2:
   3791 		delay(20000);
   3792 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3793 		break;
   3794 	case WM_T_82571:
   3795 	case WM_T_82572:
   3796 	case WM_T_82573:
   3797 	case WM_T_82574:
   3798 	case WM_T_82583:
   3799 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3800 			delay(10);
   3801 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3802 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3803 			CSR_WRITE_FLUSH(sc);
   3804 		}
   3805 		/* check EECD_EE_AUTORD */
   3806 		wm_get_auto_rd_done(sc);
   3807 		/*
   3808 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3809 		 * is set.
   3810 		 */
   3811 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3812 		    || (sc->sc_type == WM_T_82583))
   3813 			delay(25*1000);
   3814 		break;
   3815 	case WM_T_82575:
   3816 	case WM_T_82576:
   3817 	case WM_T_82580:
   3818 	case WM_T_I350:
   3819 	case WM_T_I354:
   3820 	case WM_T_I210:
   3821 	case WM_T_I211:
   3822 	case WM_T_80003:
   3823 		/* check EECD_EE_AUTORD */
   3824 		wm_get_auto_rd_done(sc);
   3825 		break;
   3826 	case WM_T_ICH8:
   3827 	case WM_T_ICH9:
   3828 	case WM_T_ICH10:
   3829 	case WM_T_PCH:
   3830 	case WM_T_PCH2:
   3831 	case WM_T_PCH_LPT:
   3832 		break;
   3833 	default:
   3834 		panic("%s: unknown type\n", __func__);
   3835 	}
   3836 
   3837 	/* Check whether EEPROM is present or not */
   3838 	switch (sc->sc_type) {
   3839 	case WM_T_82575:
   3840 	case WM_T_82576:
   3841 	case WM_T_82580:
   3842 	case WM_T_I350:
   3843 	case WM_T_I354:
   3844 	case WM_T_ICH8:
   3845 	case WM_T_ICH9:
   3846 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3847 			/* Not found */
   3848 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3849 			if (sc->sc_type == WM_T_82575)
   3850 				wm_reset_init_script_82575(sc);
   3851 		}
   3852 		break;
   3853 	default:
   3854 		break;
   3855 	}
   3856 
   3857 	if ((sc->sc_type == WM_T_82580)
   3858 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3859 		/* clear global device reset status bit */
   3860 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3861 	}
   3862 
   3863 	/* Clear any pending interrupt events. */
   3864 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3865 	reg = CSR_READ(sc, WMREG_ICR);
   3866 
   3867 	/* reload sc_ctrl */
   3868 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3869 
   3870 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3871 		wm_set_eee_i350(sc);
   3872 
   3873 	/* dummy read from WUC */
   3874 	if (sc->sc_type == WM_T_PCH)
   3875 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3876 	/*
   3877 	 * For PCH, this write will make sure that any noise will be detected
   3878 	 * as a CRC error and be dropped rather than show up as a bad packet
   3879 	 * to the DMA engine
   3880 	 */
   3881 	if (sc->sc_type == WM_T_PCH)
   3882 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3883 
   3884 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3885 		CSR_WRITE(sc, WMREG_WUC, 0);
   3886 
   3887 	wm_reset_mdicnfg_82580(sc);
   3888 
   3889 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3890 		wm_pll_workaround_i210(sc);
   3891 }
   3892 
   3893 /*
   3894  * wm_add_rxbuf:
   3895  *
   3896  *	Add a receive buffer to the indiciated descriptor.
   3897  */
   3898 static int
   3899 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3900 {
   3901 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3902 	struct mbuf *m;
   3903 	int error;
   3904 
   3905 	KASSERT(WM_RX_LOCKED(sc));
   3906 
   3907 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3908 	if (m == NULL)
   3909 		return ENOBUFS;
   3910 
   3911 	MCLGET(m, M_DONTWAIT);
   3912 	if ((m->m_flags & M_EXT) == 0) {
   3913 		m_freem(m);
   3914 		return ENOBUFS;
   3915 	}
   3916 
   3917 	if (rxs->rxs_mbuf != NULL)
   3918 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3919 
   3920 	rxs->rxs_mbuf = m;
   3921 
   3922 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3923 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3924 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3925 	if (error) {
   3926 		/* XXX XXX XXX */
   3927 		aprint_error_dev(sc->sc_dev,
   3928 		    "unable to load rx DMA map %d, error = %d\n",
   3929 		    idx, error);
   3930 		panic("wm_add_rxbuf");
   3931 	}
   3932 
   3933 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3934 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3935 
   3936 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3937 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3938 			WM_INIT_RXDESC(sc, idx);
   3939 	} else
   3940 		WM_INIT_RXDESC(sc, idx);
   3941 
   3942 	return 0;
   3943 }
   3944 
   3945 /*
   3946  * wm_rxdrain:
   3947  *
   3948  *	Drain the receive queue.
   3949  */
   3950 static void
   3951 wm_rxdrain(struct wm_softc *sc)
   3952 {
   3953 	struct wm_rxsoft *rxs;
   3954 	int i;
   3955 
   3956 	KASSERT(WM_RX_LOCKED(sc));
   3957 
   3958 	for (i = 0; i < WM_NRXDESC; i++) {
   3959 		rxs = &sc->sc_rxsoft[i];
   3960 		if (rxs->rxs_mbuf != NULL) {
   3961 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3962 			m_freem(rxs->rxs_mbuf);
   3963 			rxs->rxs_mbuf = NULL;
   3964 		}
   3965 	}
   3966 }
   3967 
   3968 /*
   3969  * wm_init:		[ifnet interface function]
   3970  *
   3971  *	Initialize the interface.
   3972  */
   3973 static int
   3974 wm_init(struct ifnet *ifp)
   3975 {
   3976 	struct wm_softc *sc = ifp->if_softc;
   3977 	int ret;
   3978 
   3979 	WM_BOTH_LOCK(sc);
   3980 	ret = wm_init_locked(ifp);
   3981 	WM_BOTH_UNLOCK(sc);
   3982 
   3983 	return ret;
   3984 }
   3985 
   3986 static int
   3987 wm_init_locked(struct ifnet *ifp)
   3988 {
   3989 	struct wm_softc *sc = ifp->if_softc;
   3990 	struct wm_rxsoft *rxs;
   3991 	int i, j, trynum, error = 0;
   3992 	uint32_t reg;
   3993 
   3994 	KASSERT(WM_BOTH_LOCKED(sc));
   3995 	/*
   3996 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   3997 	 * There is a small but measurable benefit to avoiding the adjusment
   3998 	 * of the descriptor so that the headers are aligned, for normal mtu,
   3999 	 * on such platforms.  One possibility is that the DMA itself is
   4000 	 * slightly more efficient if the front of the entire packet (instead
   4001 	 * of the front of the headers) is aligned.
   4002 	 *
   4003 	 * Note we must always set align_tweak to 0 if we are using
   4004 	 * jumbo frames.
   4005 	 */
   4006 #ifdef __NO_STRICT_ALIGNMENT
   4007 	sc->sc_align_tweak = 0;
   4008 #else
   4009 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4010 		sc->sc_align_tweak = 0;
   4011 	else
   4012 		sc->sc_align_tweak = 2;
   4013 #endif /* __NO_STRICT_ALIGNMENT */
   4014 
   4015 	/* Cancel any pending I/O. */
   4016 	wm_stop_locked(ifp, 0);
   4017 
   4018 	/* update statistics before reset */
   4019 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4020 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4021 
   4022 	/* Reset the chip to a known state. */
   4023 	wm_reset(sc);
   4024 
   4025 	switch (sc->sc_type) {
   4026 	case WM_T_82571:
   4027 	case WM_T_82572:
   4028 	case WM_T_82573:
   4029 	case WM_T_82574:
   4030 	case WM_T_82583:
   4031 	case WM_T_80003:
   4032 	case WM_T_ICH8:
   4033 	case WM_T_ICH9:
   4034 	case WM_T_ICH10:
   4035 	case WM_T_PCH:
   4036 	case WM_T_PCH2:
   4037 	case WM_T_PCH_LPT:
   4038 		if (wm_check_mng_mode(sc) != 0)
   4039 			wm_get_hw_control(sc);
   4040 		break;
   4041 	default:
   4042 		break;
   4043 	}
   4044 
   4045 	/* Init hardware bits */
   4046 	wm_initialize_hardware_bits(sc);
   4047 
   4048 	/* Reset the PHY. */
   4049 	if (sc->sc_flags & WM_F_HAS_MII)
   4050 		wm_gmii_reset(sc);
   4051 
   4052 	/* Calculate (E)ITR value */
   4053 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4054 		sc->sc_itr = 450;	/* For EITR */
   4055 	} else if (sc->sc_type >= WM_T_82543) {
   4056 		/*
   4057 		 * Set up the interrupt throttling register (units of 256ns)
   4058 		 * Note that a footnote in Intel's documentation says this
   4059 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4060 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4061 		 * that that is also true for the 1024ns units of the other
   4062 		 * interrupt-related timer registers -- so, really, we ought
   4063 		 * to divide this value by 4 when the link speed is low.
   4064 		 *
   4065 		 * XXX implement this division at link speed change!
   4066 		 */
   4067 
   4068 		/*
   4069 		 * For N interrupts/sec, set this value to:
   4070 		 * 1000000000 / (N * 256).  Note that we set the
   4071 		 * absolute and packet timer values to this value
   4072 		 * divided by 4 to get "simple timer" behavior.
   4073 		 */
   4074 
   4075 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4076 	}
   4077 
   4078 	/* Initialize the transmit descriptor ring. */
   4079 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   4080 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   4081 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4082 	sc->sc_txfree = WM_NTXDESC(sc);
   4083 	sc->sc_txnext = 0;
   4084 
   4085 	if (sc->sc_type < WM_T_82543) {
   4086 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4087 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4088 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   4089 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   4090 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   4091 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   4092 	} else {
   4093 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4094 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4095 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   4096 		CSR_WRITE(sc, WMREG_TDH, 0);
   4097 
   4098 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4099 			/*
   4100 			 * Don't write TDT before TCTL.EN is set.
   4101 			 * See the document.
   4102 			 */
   4103 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
   4104 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   4105 			    | TXDCTL_WTHRESH(0));
   4106 		else {
   4107 			/* ITR / 4 */
   4108 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   4109 			if (sc->sc_type >= WM_T_82540) {
   4110 				/* should be same */
   4111 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   4112 			}
   4113 
   4114 			CSR_WRITE(sc, WMREG_TDT, 0);
   4115 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
   4116 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   4117 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   4118 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   4119 		}
   4120 	}
   4121 
   4122 	/* Initialize the transmit job descriptors. */
   4123 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   4124 		sc->sc_txsoft[i].txs_mbuf = NULL;
   4125 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   4126 	sc->sc_txsnext = 0;
   4127 	sc->sc_txsdirty = 0;
   4128 
   4129 	/*
   4130 	 * Initialize the receive descriptor and receive job
   4131 	 * descriptor rings.
   4132 	 */
   4133 	if (sc->sc_type < WM_T_82543) {
   4134 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   4135 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   4136 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   4137 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   4138 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   4139 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   4140 
   4141 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   4142 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   4143 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   4144 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   4145 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   4146 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   4147 	} else {
   4148 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   4149 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   4150 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   4151 
   4152 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4153 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   4154 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   4155 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   4156 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   4157 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   4158 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   4159 			    | RXDCTL_WTHRESH(1));
   4160 		} else {
   4161 			CSR_WRITE(sc, WMREG_RDH, 0);
   4162 			CSR_WRITE(sc, WMREG_RDT, 0);
   4163 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   4164 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   4165 		}
   4166 	}
   4167 	for (i = 0; i < WM_NRXDESC; i++) {
   4168 		rxs = &sc->sc_rxsoft[i];
   4169 		if (rxs->rxs_mbuf == NULL) {
   4170 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   4171 				log(LOG_ERR, "%s: unable to allocate or map "
   4172 				    "rx buffer %d, error = %d\n",
   4173 				    device_xname(sc->sc_dev), i, error);
   4174 				/*
   4175 				 * XXX Should attempt to run with fewer receive
   4176 				 * XXX buffers instead of just failing.
   4177 				 */
   4178 				wm_rxdrain(sc);
   4179 				goto out;
   4180 			}
   4181 		} else {
   4182 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4183 				WM_INIT_RXDESC(sc, i);
   4184 			/*
   4185 			 * For 82575 and newer device, the RX descriptors
   4186 			 * must be initialized after the setting of RCTL.EN in
   4187 			 * wm_set_filter()
   4188 			 */
   4189 		}
   4190 	}
   4191 	sc->sc_rxptr = 0;
   4192 	sc->sc_rxdiscard = 0;
   4193 	WM_RXCHAIN_RESET(sc);
   4194 
   4195 	/*
   4196 	 * Clear out the VLAN table -- we don't use it (yet).
   4197 	 */
   4198 	CSR_WRITE(sc, WMREG_VET, 0);
   4199 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4200 		trynum = 10; /* Due to hw errata */
   4201 	else
   4202 		trynum = 1;
   4203 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4204 		for (j = 0; j < trynum; j++)
   4205 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4206 
   4207 	/*
   4208 	 * Set up flow-control parameters.
   4209 	 *
   4210 	 * XXX Values could probably stand some tuning.
   4211 	 */
   4212 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4213 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4214 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4215 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4216 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4217 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4218 	}
   4219 
   4220 	sc->sc_fcrtl = FCRTL_DFLT;
   4221 	if (sc->sc_type < WM_T_82543) {
   4222 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4223 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4224 	} else {
   4225 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4226 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4227 	}
   4228 
   4229 	if (sc->sc_type == WM_T_80003)
   4230 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4231 	else
   4232 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4233 
   4234 	/* Writes the control register. */
   4235 	wm_set_vlan(sc);
   4236 
   4237 	if (sc->sc_flags & WM_F_HAS_MII) {
   4238 		int val;
   4239 
   4240 		switch (sc->sc_type) {
   4241 		case WM_T_80003:
   4242 		case WM_T_ICH8:
   4243 		case WM_T_ICH9:
   4244 		case WM_T_ICH10:
   4245 		case WM_T_PCH:
   4246 		case WM_T_PCH2:
   4247 		case WM_T_PCH_LPT:
   4248 			/*
   4249 			 * Set the mac to wait the maximum time between each
   4250 			 * iteration and increase the max iterations when
   4251 			 * polling the phy; this fixes erroneous timeouts at
   4252 			 * 10Mbps.
   4253 			 */
   4254 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4255 			    0xFFFF);
   4256 			val = wm_kmrn_readreg(sc,
   4257 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4258 			val |= 0x3F;
   4259 			wm_kmrn_writereg(sc,
   4260 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4261 			break;
   4262 		default:
   4263 			break;
   4264 		}
   4265 
   4266 		if (sc->sc_type == WM_T_80003) {
   4267 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4268 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4269 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4270 
   4271 			/* Bypass RX and TX FIFO's */
   4272 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4273 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4274 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4275 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4276 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4277 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4278 		}
   4279 	}
   4280 #if 0
   4281 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4282 #endif
   4283 
   4284 	/* Set up checksum offload parameters. */
   4285 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4286 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4287 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4288 		reg |= RXCSUM_IPOFL;
   4289 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4290 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4291 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4292 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4293 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4294 
   4295 	/* Set up the interrupt registers. */
   4296 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4297 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4298 	    ICR_RXO | ICR_RXT0;
   4299 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4300 
   4301 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4302 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4303 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4304 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4305 		reg |= KABGTXD_BGSQLBIAS;
   4306 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4307 	}
   4308 
   4309 	/* Set up the inter-packet gap. */
   4310 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4311 
   4312 	if (sc->sc_type >= WM_T_82543) {
   4313 		/*
   4314 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4315 		 * the multi queue function with MSI-X.
   4316 		 */
   4317 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4318 			CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
   4319 		else
   4320 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4321 	}
   4322 
   4323 	/* Set the VLAN ethernetype. */
   4324 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4325 
   4326 	/*
   4327 	 * Set up the transmit control register; we start out with
   4328 	 * a collision distance suitable for FDX, but update it whe
   4329 	 * we resolve the media type.
   4330 	 */
   4331 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4332 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4333 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4334 	if (sc->sc_type >= WM_T_82571)
   4335 		sc->sc_tctl |= TCTL_MULR;
   4336 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4337 
   4338 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4339 		/* Write TDT after TCTL.EN is set. See the document. */
   4340 		CSR_WRITE(sc, WMREG_TDT, 0);
   4341 	}
   4342 
   4343 	if (sc->sc_type == WM_T_80003) {
   4344 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4345 		reg &= ~TCTL_EXT_GCEX_MASK;
   4346 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4347 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4348 	}
   4349 
   4350 	/* Set the media. */
   4351 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4352 		goto out;
   4353 
   4354 	/* Configure for OS presence */
   4355 	wm_init_manageability(sc);
   4356 
   4357 	/*
   4358 	 * Set up the receive control register; we actually program
   4359 	 * the register when we set the receive filter.  Use multicast
   4360 	 * address offset type 0.
   4361 	 *
   4362 	 * Only the i82544 has the ability to strip the incoming
   4363 	 * CRC, so we don't enable that feature.
   4364 	 */
   4365 	sc->sc_mchash_type = 0;
   4366 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4367 	    | RCTL_MO(sc->sc_mchash_type);
   4368 
   4369 	/*
   4370 	 * The I350 has a bug where it always strips the CRC whether
   4371 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4372 	 */
   4373 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4374 	    || (sc->sc_type == WM_T_I210))
   4375 		sc->sc_rctl |= RCTL_SECRC;
   4376 
   4377 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4378 	    && (ifp->if_mtu > ETHERMTU)) {
   4379 		sc->sc_rctl |= RCTL_LPE;
   4380 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4381 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4382 	}
   4383 
   4384 	if (MCLBYTES == 2048) {
   4385 		sc->sc_rctl |= RCTL_2k;
   4386 	} else {
   4387 		if (sc->sc_type >= WM_T_82543) {
   4388 			switch (MCLBYTES) {
   4389 			case 4096:
   4390 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4391 				break;
   4392 			case 8192:
   4393 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4394 				break;
   4395 			case 16384:
   4396 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4397 				break;
   4398 			default:
   4399 				panic("wm_init: MCLBYTES %d unsupported",
   4400 				    MCLBYTES);
   4401 				break;
   4402 			}
   4403 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4404 	}
   4405 
   4406 	/* Set the receive filter. */
   4407 	wm_set_filter(sc);
   4408 
   4409 	/* Enable ECC */
   4410 	switch (sc->sc_type) {
   4411 	case WM_T_82571:
   4412 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4413 		reg |= PBA_ECC_CORR_EN;
   4414 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4415 		break;
   4416 	case WM_T_PCH_LPT:
   4417 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4418 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4419 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4420 
   4421 		reg = CSR_READ(sc, WMREG_CTRL);
   4422 		reg |= CTRL_MEHE;
   4423 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4424 		break;
   4425 	default:
   4426 		break;
   4427 	}
   4428 
   4429 	/* On 575 and later set RDT only if RX enabled */
   4430 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4431 		for (i = 0; i < WM_NRXDESC; i++)
   4432 			WM_INIT_RXDESC(sc, i);
   4433 
   4434 	sc->sc_stopping = false;
   4435 
   4436 	/* Start the one second link check clock. */
   4437 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4438 
   4439 	/* ...all done! */
   4440 	ifp->if_flags |= IFF_RUNNING;
   4441 	ifp->if_flags &= ~IFF_OACTIVE;
   4442 
   4443  out:
   4444 	sc->sc_if_flags = ifp->if_flags;
   4445 	if (error)
   4446 		log(LOG_ERR, "%s: interface not running\n",
   4447 		    device_xname(sc->sc_dev));
   4448 	return error;
   4449 }
   4450 
   4451 /*
   4452  * wm_stop:		[ifnet interface function]
   4453  *
   4454  *	Stop transmission on the interface.
   4455  */
   4456 static void
   4457 wm_stop(struct ifnet *ifp, int disable)
   4458 {
   4459 	struct wm_softc *sc = ifp->if_softc;
   4460 
   4461 	WM_BOTH_LOCK(sc);
   4462 	wm_stop_locked(ifp, disable);
   4463 	WM_BOTH_UNLOCK(sc);
   4464 }
   4465 
   4466 static void
   4467 wm_stop_locked(struct ifnet *ifp, int disable)
   4468 {
   4469 	struct wm_softc *sc = ifp->if_softc;
   4470 	struct wm_txsoft *txs;
   4471 	int i;
   4472 
   4473 	KASSERT(WM_BOTH_LOCKED(sc));
   4474 
   4475 	sc->sc_stopping = true;
   4476 
   4477 	/* Stop the one second clock. */
   4478 	callout_stop(&sc->sc_tick_ch);
   4479 
   4480 	/* Stop the 82547 Tx FIFO stall check timer. */
   4481 	if (sc->sc_type == WM_T_82547)
   4482 		callout_stop(&sc->sc_txfifo_ch);
   4483 
   4484 	if (sc->sc_flags & WM_F_HAS_MII) {
   4485 		/* Down the MII. */
   4486 		mii_down(&sc->sc_mii);
   4487 	} else {
   4488 #if 0
   4489 		/* Should we clear PHY's status properly? */
   4490 		wm_reset(sc);
   4491 #endif
   4492 	}
   4493 
   4494 	/* Stop the transmit and receive processes. */
   4495 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4496 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4497 	sc->sc_rctl &= ~RCTL_EN;
   4498 
   4499 	/*
   4500 	 * Clear the interrupt mask to ensure the device cannot assert its
   4501 	 * interrupt line.
   4502 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   4503 	 * any currently pending or shared interrupt.
   4504 	 */
   4505 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4506 	sc->sc_icr = 0;
   4507 
   4508 	/* Release any queued transmit buffers. */
   4509 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   4510 		txs = &sc->sc_txsoft[i];
   4511 		if (txs->txs_mbuf != NULL) {
   4512 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4513 			m_freem(txs->txs_mbuf);
   4514 			txs->txs_mbuf = NULL;
   4515 		}
   4516 	}
   4517 
   4518 	/* Mark the interface as down and cancel the watchdog timer. */
   4519 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4520 	ifp->if_timer = 0;
   4521 
   4522 	if (disable)
   4523 		wm_rxdrain(sc);
   4524 
   4525 #if 0 /* notyet */
   4526 	if (sc->sc_type >= WM_T_82544)
   4527 		CSR_WRITE(sc, WMREG_WUC, 0);
   4528 #endif
   4529 }
   4530 
   4531 /*
   4532  * wm_tx_offload:
   4533  *
   4534  *	Set up TCP/IP checksumming parameters for the
   4535  *	specified packet.
   4536  */
   4537 static int
   4538 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4539     uint8_t *fieldsp)
   4540 {
   4541 	struct mbuf *m0 = txs->txs_mbuf;
   4542 	struct livengood_tcpip_ctxdesc *t;
   4543 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4544 	uint32_t ipcse;
   4545 	struct ether_header *eh;
   4546 	int offset, iphl;
   4547 	uint8_t fields;
   4548 
   4549 	/*
   4550 	 * XXX It would be nice if the mbuf pkthdr had offset
   4551 	 * fields for the protocol headers.
   4552 	 */
   4553 
   4554 	eh = mtod(m0, struct ether_header *);
   4555 	switch (htons(eh->ether_type)) {
   4556 	case ETHERTYPE_IP:
   4557 	case ETHERTYPE_IPV6:
   4558 		offset = ETHER_HDR_LEN;
   4559 		break;
   4560 
   4561 	case ETHERTYPE_VLAN:
   4562 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4563 		break;
   4564 
   4565 	default:
   4566 		/*
   4567 		 * Don't support this protocol or encapsulation.
   4568 		 */
   4569 		*fieldsp = 0;
   4570 		*cmdp = 0;
   4571 		return 0;
   4572 	}
   4573 
   4574 	if ((m0->m_pkthdr.csum_flags &
   4575 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4576 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4577 	} else {
   4578 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4579 	}
   4580 	ipcse = offset + iphl - 1;
   4581 
   4582 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4583 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4584 	seg = 0;
   4585 	fields = 0;
   4586 
   4587 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4588 		int hlen = offset + iphl;
   4589 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4590 
   4591 		if (__predict_false(m0->m_len <
   4592 				    (hlen + sizeof(struct tcphdr)))) {
   4593 			/*
   4594 			 * TCP/IP headers are not in the first mbuf; we need
   4595 			 * to do this the slow and painful way.  Let's just
   4596 			 * hope this doesn't happen very often.
   4597 			 */
   4598 			struct tcphdr th;
   4599 
   4600 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4601 
   4602 			m_copydata(m0, hlen, sizeof(th), &th);
   4603 			if (v4) {
   4604 				struct ip ip;
   4605 
   4606 				m_copydata(m0, offset, sizeof(ip), &ip);
   4607 				ip.ip_len = 0;
   4608 				m_copyback(m0,
   4609 				    offset + offsetof(struct ip, ip_len),
   4610 				    sizeof(ip.ip_len), &ip.ip_len);
   4611 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4612 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4613 			} else {
   4614 				struct ip6_hdr ip6;
   4615 
   4616 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4617 				ip6.ip6_plen = 0;
   4618 				m_copyback(m0,
   4619 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4620 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4621 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4622 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4623 			}
   4624 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4625 			    sizeof(th.th_sum), &th.th_sum);
   4626 
   4627 			hlen += th.th_off << 2;
   4628 		} else {
   4629 			/*
   4630 			 * TCP/IP headers are in the first mbuf; we can do
   4631 			 * this the easy way.
   4632 			 */
   4633 			struct tcphdr *th;
   4634 
   4635 			if (v4) {
   4636 				struct ip *ip =
   4637 				    (void *)(mtod(m0, char *) + offset);
   4638 				th = (void *)(mtod(m0, char *) + hlen);
   4639 
   4640 				ip->ip_len = 0;
   4641 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4642 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4643 			} else {
   4644 				struct ip6_hdr *ip6 =
   4645 				    (void *)(mtod(m0, char *) + offset);
   4646 				th = (void *)(mtod(m0, char *) + hlen);
   4647 
   4648 				ip6->ip6_plen = 0;
   4649 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4650 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4651 			}
   4652 			hlen += th->th_off << 2;
   4653 		}
   4654 
   4655 		if (v4) {
   4656 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4657 			cmdlen |= WTX_TCPIP_CMD_IP;
   4658 		} else {
   4659 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4660 			ipcse = 0;
   4661 		}
   4662 		cmd |= WTX_TCPIP_CMD_TSE;
   4663 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4664 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4665 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4666 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4667 	}
   4668 
   4669 	/*
   4670 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4671 	 * offload feature, if we load the context descriptor, we
   4672 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4673 	 */
   4674 
   4675 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4676 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4677 	    WTX_TCPIP_IPCSE(ipcse);
   4678 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4679 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4680 		fields |= WTX_IXSM;
   4681 	}
   4682 
   4683 	offset += iphl;
   4684 
   4685 	if (m0->m_pkthdr.csum_flags &
   4686 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4687 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4688 		fields |= WTX_TXSM;
   4689 		tucs = WTX_TCPIP_TUCSS(offset) |
   4690 		    WTX_TCPIP_TUCSO(offset +
   4691 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4692 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4693 	} else if ((m0->m_pkthdr.csum_flags &
   4694 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4695 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4696 		fields |= WTX_TXSM;
   4697 		tucs = WTX_TCPIP_TUCSS(offset) |
   4698 		    WTX_TCPIP_TUCSO(offset +
   4699 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4700 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4701 	} else {
   4702 		/* Just initialize it to a valid TCP context. */
   4703 		tucs = WTX_TCPIP_TUCSS(offset) |
   4704 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4705 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4706 	}
   4707 
   4708 	/* Fill in the context descriptor. */
   4709 	t = (struct livengood_tcpip_ctxdesc *)
   4710 	    &sc->sc_txdescs[sc->sc_txnext];
   4711 	t->tcpip_ipcs = htole32(ipcs);
   4712 	t->tcpip_tucs = htole32(tucs);
   4713 	t->tcpip_cmdlen = htole32(cmdlen);
   4714 	t->tcpip_seg = htole32(seg);
   4715 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4716 
   4717 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4718 	txs->txs_ndesc++;
   4719 
   4720 	*cmdp = cmd;
   4721 	*fieldsp = fields;
   4722 
   4723 	return 0;
   4724 }
   4725 
   4726 static void
   4727 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4728 {
   4729 	struct mbuf *m;
   4730 	int i;
   4731 
   4732 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4733 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4734 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4735 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4736 		    m->m_data, m->m_len, m->m_flags);
   4737 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4738 	    i, i == 1 ? "" : "s");
   4739 }
   4740 
   4741 /*
   4742  * wm_82547_txfifo_stall:
   4743  *
   4744  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4745  *	reset the FIFO pointers, and restart packet transmission.
   4746  */
   4747 static void
   4748 wm_82547_txfifo_stall(void *arg)
   4749 {
   4750 	struct wm_softc *sc = arg;
   4751 #ifndef WM_MPSAFE
   4752 	int s;
   4753 
   4754 	s = splnet();
   4755 #endif
   4756 	WM_TX_LOCK(sc);
   4757 
   4758 	if (sc->sc_stopping)
   4759 		goto out;
   4760 
   4761 	if (sc->sc_txfifo_stall) {
   4762 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   4763 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4764 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4765 			/*
   4766 			 * Packets have drained.  Stop transmitter, reset
   4767 			 * FIFO pointers, restart transmitter, and kick
   4768 			 * the packet queue.
   4769 			 */
   4770 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4771 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4772 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   4773 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   4774 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   4775 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   4776 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4777 			CSR_WRITE_FLUSH(sc);
   4778 
   4779 			sc->sc_txfifo_head = 0;
   4780 			sc->sc_txfifo_stall = 0;
   4781 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4782 		} else {
   4783 			/*
   4784 			 * Still waiting for packets to drain; try again in
   4785 			 * another tick.
   4786 			 */
   4787 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4788 		}
   4789 	}
   4790 
   4791 out:
   4792 	WM_TX_UNLOCK(sc);
   4793 #ifndef WM_MPSAFE
   4794 	splx(s);
   4795 #endif
   4796 }
   4797 
   4798 /*
   4799  * wm_82547_txfifo_bugchk:
   4800  *
   4801  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   4802  *	prevent enqueueing a packet that would wrap around the end
   4803  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   4804  *
   4805  *	We do this by checking the amount of space before the end
   4806  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   4807  *	the Tx FIFO, wait for all remaining packets to drain, reset
   4808  *	the internal FIFO pointers to the beginning, and restart
   4809  *	transmission on the interface.
   4810  */
   4811 #define	WM_FIFO_HDR		0x10
   4812 #define	WM_82547_PAD_LEN	0x3e0
   4813 static int
   4814 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   4815 {
   4816 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   4817 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   4818 
   4819 	/* Just return if already stalled. */
   4820 	if (sc->sc_txfifo_stall)
   4821 		return 1;
   4822 
   4823 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4824 		/* Stall only occurs in half-duplex mode. */
   4825 		goto send_packet;
   4826 	}
   4827 
   4828 	if (len >= WM_82547_PAD_LEN + space) {
   4829 		sc->sc_txfifo_stall = 1;
   4830 		callout_schedule(&sc->sc_txfifo_ch, 1);
   4831 		return 1;
   4832 	}
   4833 
   4834  send_packet:
   4835 	sc->sc_txfifo_head += len;
   4836 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   4837 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   4838 
   4839 	return 0;
   4840 }
   4841 
   4842 /*
   4843  * wm_start:		[ifnet interface function]
   4844  *
   4845  *	Start packet transmission on the interface.
   4846  */
   4847 static void
   4848 wm_start(struct ifnet *ifp)
   4849 {
   4850 	struct wm_softc *sc = ifp->if_softc;
   4851 
   4852 	WM_TX_LOCK(sc);
   4853 	if (!sc->sc_stopping)
   4854 		wm_start_locked(ifp);
   4855 	WM_TX_UNLOCK(sc);
   4856 }
   4857 
   4858 static void
   4859 wm_start_locked(struct ifnet *ifp)
   4860 {
   4861 	struct wm_softc *sc = ifp->if_softc;
   4862 	struct mbuf *m0;
   4863 	struct m_tag *mtag;
   4864 	struct wm_txsoft *txs;
   4865 	bus_dmamap_t dmamap;
   4866 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   4867 	bus_addr_t curaddr;
   4868 	bus_size_t seglen, curlen;
   4869 	uint32_t cksumcmd;
   4870 	uint8_t cksumfields;
   4871 
   4872 	KASSERT(WM_TX_LOCKED(sc));
   4873 
   4874 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4875 		return;
   4876 
   4877 	/* Remember the previous number of free descriptors. */
   4878 	ofree = sc->sc_txfree;
   4879 
   4880 	/*
   4881 	 * Loop through the send queue, setting up transmit descriptors
   4882 	 * until we drain the queue, or use up all available transmit
   4883 	 * descriptors.
   4884 	 */
   4885 	for (;;) {
   4886 		m0 = NULL;
   4887 
   4888 		/* Get a work queue entry. */
   4889 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4890 			wm_txintr(sc);
   4891 			if (sc->sc_txsfree == 0) {
   4892 				DPRINTF(WM_DEBUG_TX,
   4893 				    ("%s: TX: no free job descriptors\n",
   4894 					device_xname(sc->sc_dev)));
   4895 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4896 				break;
   4897 			}
   4898 		}
   4899 
   4900 		/* Grab a packet off the queue. */
   4901 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4902 		if (m0 == NULL)
   4903 			break;
   4904 
   4905 		DPRINTF(WM_DEBUG_TX,
   4906 		    ("%s: TX: have packet to transmit: %p\n",
   4907 		    device_xname(sc->sc_dev), m0));
   4908 
   4909 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4910 		dmamap = txs->txs_dmamap;
   4911 
   4912 		use_tso = (m0->m_pkthdr.csum_flags &
   4913 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   4914 
   4915 		/*
   4916 		 * So says the Linux driver:
   4917 		 * The controller does a simple calculation to make sure
   4918 		 * there is enough room in the FIFO before initiating the
   4919 		 * DMA for each buffer.  The calc is:
   4920 		 *	4 = ceil(buffer len / MSS)
   4921 		 * To make sure we don't overrun the FIFO, adjust the max
   4922 		 * buffer len if the MSS drops.
   4923 		 */
   4924 		dmamap->dm_maxsegsz =
   4925 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   4926 		    ? m0->m_pkthdr.segsz << 2
   4927 		    : WTX_MAX_LEN;
   4928 
   4929 		/*
   4930 		 * Load the DMA map.  If this fails, the packet either
   4931 		 * didn't fit in the allotted number of segments, or we
   4932 		 * were short on resources.  For the too-many-segments
   4933 		 * case, we simply report an error and drop the packet,
   4934 		 * since we can't sanely copy a jumbo packet to a single
   4935 		 * buffer.
   4936 		 */
   4937 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   4938 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   4939 		if (error) {
   4940 			if (error == EFBIG) {
   4941 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4942 				log(LOG_ERR, "%s: Tx packet consumes too many "
   4943 				    "DMA segments, dropping...\n",
   4944 				    device_xname(sc->sc_dev));
   4945 				wm_dump_mbuf_chain(sc, m0);
   4946 				m_freem(m0);
   4947 				continue;
   4948 			}
   4949 			/*  Short on resources, just stop for now. */
   4950 			DPRINTF(WM_DEBUG_TX,
   4951 			    ("%s: TX: dmamap load failed: %d\n",
   4952 			    device_xname(sc->sc_dev), error));
   4953 			break;
   4954 		}
   4955 
   4956 		segs_needed = dmamap->dm_nsegs;
   4957 		if (use_tso) {
   4958 			/* For sentinel descriptor; see below. */
   4959 			segs_needed++;
   4960 		}
   4961 
   4962 		/*
   4963 		 * Ensure we have enough descriptors free to describe
   4964 		 * the packet.  Note, we always reserve one descriptor
   4965 		 * at the end of the ring due to the semantics of the
   4966 		 * TDT register, plus one more in the event we need
   4967 		 * to load offload context.
   4968 		 */
   4969 		if (segs_needed > sc->sc_txfree - 2) {
   4970 			/*
   4971 			 * Not enough free descriptors to transmit this
   4972 			 * packet.  We haven't committed anything yet,
   4973 			 * so just unload the DMA map, put the packet
   4974 			 * pack on the queue, and punt.  Notify the upper
   4975 			 * layer that there are no more slots left.
   4976 			 */
   4977 			DPRINTF(WM_DEBUG_TX,
   4978 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   4979 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   4980 			    segs_needed, sc->sc_txfree - 1));
   4981 			ifp->if_flags |= IFF_OACTIVE;
   4982 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4983 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   4984 			break;
   4985 		}
   4986 
   4987 		/*
   4988 		 * Check for 82547 Tx FIFO bug.  We need to do this
   4989 		 * once we know we can transmit the packet, since we
   4990 		 * do some internal FIFO space accounting here.
   4991 		 */
   4992 		if (sc->sc_type == WM_T_82547 &&
   4993 		    wm_82547_txfifo_bugchk(sc, m0)) {
   4994 			DPRINTF(WM_DEBUG_TX,
   4995 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   4996 			    device_xname(sc->sc_dev)));
   4997 			ifp->if_flags |= IFF_OACTIVE;
   4998 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4999 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   5000 			break;
   5001 		}
   5002 
   5003 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5004 
   5005 		DPRINTF(WM_DEBUG_TX,
   5006 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5007 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5008 
   5009 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5010 
   5011 		/*
   5012 		 * Store a pointer to the packet so that we can free it
   5013 		 * later.
   5014 		 *
   5015 		 * Initially, we consider the number of descriptors the
   5016 		 * packet uses the number of DMA segments.  This may be
   5017 		 * incremented by 1 if we do checksum offload (a descriptor
   5018 		 * is used to set the checksum context).
   5019 		 */
   5020 		txs->txs_mbuf = m0;
   5021 		txs->txs_firstdesc = sc->sc_txnext;
   5022 		txs->txs_ndesc = segs_needed;
   5023 
   5024 		/* Set up offload parameters for this packet. */
   5025 		if (m0->m_pkthdr.csum_flags &
   5026 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5027 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5028 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5029 			if (wm_tx_offload(sc, txs, &cksumcmd,
   5030 					  &cksumfields) != 0) {
   5031 				/* Error message already displayed. */
   5032 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5033 				continue;
   5034 			}
   5035 		} else {
   5036 			cksumcmd = 0;
   5037 			cksumfields = 0;
   5038 		}
   5039 
   5040 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   5041 
   5042 		/* Sync the DMA map. */
   5043 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5044 		    BUS_DMASYNC_PREWRITE);
   5045 
   5046 		/* Initialize the transmit descriptor. */
   5047 		for (nexttx = sc->sc_txnext, seg = 0;
   5048 		     seg < dmamap->dm_nsegs; seg++) {
   5049 			for (seglen = dmamap->dm_segs[seg].ds_len,
   5050 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   5051 			     seglen != 0;
   5052 			     curaddr += curlen, seglen -= curlen,
   5053 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   5054 				curlen = seglen;
   5055 
   5056 				/*
   5057 				 * So says the Linux driver:
   5058 				 * Work around for premature descriptor
   5059 				 * write-backs in TSO mode.  Append a
   5060 				 * 4-byte sentinel descriptor.
   5061 				 */
   5062 				if (use_tso &&
   5063 				    seg == dmamap->dm_nsegs - 1 &&
   5064 				    curlen > 8)
   5065 					curlen -= 4;
   5066 
   5067 				wm_set_dma_addr(
   5068 				    &sc->sc_txdescs[nexttx].wtx_addr,
   5069 				    curaddr);
   5070 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   5071 				    htole32(cksumcmd | curlen);
   5072 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   5073 				    0;
   5074 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   5075 				    cksumfields;
   5076 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5077 				lasttx = nexttx;
   5078 
   5079 				DPRINTF(WM_DEBUG_TX,
   5080 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   5081 				     "len %#04zx\n",
   5082 				    device_xname(sc->sc_dev), nexttx,
   5083 				    (uint64_t)curaddr, curlen));
   5084 			}
   5085 		}
   5086 
   5087 		KASSERT(lasttx != -1);
   5088 
   5089 		/*
   5090 		 * Set up the command byte on the last descriptor of
   5091 		 * the packet.  If we're in the interrupt delay window,
   5092 		 * delay the interrupt.
   5093 		 */
   5094 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5095 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5096 
   5097 		/*
   5098 		 * If VLANs are enabled and the packet has a VLAN tag, set
   5099 		 * up the descriptor to encapsulate the packet for us.
   5100 		 *
   5101 		 * This is only valid on the last descriptor of the packet.
   5102 		 */
   5103 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5104 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5105 			    htole32(WTX_CMD_VLE);
   5106 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   5107 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5108 		}
   5109 
   5110 		txs->txs_lastdesc = lasttx;
   5111 
   5112 		DPRINTF(WM_DEBUG_TX,
   5113 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5114 		    device_xname(sc->sc_dev),
   5115 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5116 
   5117 		/* Sync the descriptors we're using. */
   5118 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5119 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5120 
   5121 		/* Give the packet to the chip. */
   5122 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5123 
   5124 		DPRINTF(WM_DEBUG_TX,
   5125 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5126 
   5127 		DPRINTF(WM_DEBUG_TX,
   5128 		    ("%s: TX: finished transmitting packet, job %d\n",
   5129 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5130 
   5131 		/* Advance the tx pointer. */
   5132 		sc->sc_txfree -= txs->txs_ndesc;
   5133 		sc->sc_txnext = nexttx;
   5134 
   5135 		sc->sc_txsfree--;
   5136 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5137 
   5138 		/* Pass the packet to any BPF listeners. */
   5139 		bpf_mtap(ifp, m0);
   5140 	}
   5141 
   5142 	if (m0 != NULL) {
   5143 		ifp->if_flags |= IFF_OACTIVE;
   5144 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5145 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5146 		m_freem(m0);
   5147 	}
   5148 
   5149 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5150 		/* No more slots; notify upper layer. */
   5151 		ifp->if_flags |= IFF_OACTIVE;
   5152 	}
   5153 
   5154 	if (sc->sc_txfree != ofree) {
   5155 		/* Set a watchdog timer in case the chip flakes out. */
   5156 		ifp->if_timer = 5;
   5157 	}
   5158 }
   5159 
   5160 /*
   5161  * wm_nq_tx_offload:
   5162  *
   5163  *	Set up TCP/IP checksumming parameters for the
   5164  *	specified packet, for NEWQUEUE devices
   5165  */
   5166 static int
   5167 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   5168     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   5169 {
   5170 	struct mbuf *m0 = txs->txs_mbuf;
   5171 	struct m_tag *mtag;
   5172 	uint32_t vl_len, mssidx, cmdc;
   5173 	struct ether_header *eh;
   5174 	int offset, iphl;
   5175 
   5176 	/*
   5177 	 * XXX It would be nice if the mbuf pkthdr had offset
   5178 	 * fields for the protocol headers.
   5179 	 */
   5180 	*cmdlenp = 0;
   5181 	*fieldsp = 0;
   5182 
   5183 	eh = mtod(m0, struct ether_header *);
   5184 	switch (htons(eh->ether_type)) {
   5185 	case ETHERTYPE_IP:
   5186 	case ETHERTYPE_IPV6:
   5187 		offset = ETHER_HDR_LEN;
   5188 		break;
   5189 
   5190 	case ETHERTYPE_VLAN:
   5191 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5192 		break;
   5193 
   5194 	default:
   5195 		/* Don't support this protocol or encapsulation. */
   5196 		*do_csum = false;
   5197 		return 0;
   5198 	}
   5199 	*do_csum = true;
   5200 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   5201 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   5202 
   5203 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   5204 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   5205 
   5206 	if ((m0->m_pkthdr.csum_flags &
   5207 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   5208 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5209 	} else {
   5210 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5211 	}
   5212 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   5213 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   5214 
   5215 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   5216 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   5217 		     << NQTXC_VLLEN_VLAN_SHIFT);
   5218 		*cmdlenp |= NQTX_CMD_VLE;
   5219 	}
   5220 
   5221 	mssidx = 0;
   5222 
   5223 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5224 		int hlen = offset + iphl;
   5225 		int tcp_hlen;
   5226 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5227 
   5228 		if (__predict_false(m0->m_len <
   5229 				    (hlen + sizeof(struct tcphdr)))) {
   5230 			/*
   5231 			 * TCP/IP headers are not in the first mbuf; we need
   5232 			 * to do this the slow and painful way.  Let's just
   5233 			 * hope this doesn't happen very often.
   5234 			 */
   5235 			struct tcphdr th;
   5236 
   5237 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5238 
   5239 			m_copydata(m0, hlen, sizeof(th), &th);
   5240 			if (v4) {
   5241 				struct ip ip;
   5242 
   5243 				m_copydata(m0, offset, sizeof(ip), &ip);
   5244 				ip.ip_len = 0;
   5245 				m_copyback(m0,
   5246 				    offset + offsetof(struct ip, ip_len),
   5247 				    sizeof(ip.ip_len), &ip.ip_len);
   5248 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5249 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5250 			} else {
   5251 				struct ip6_hdr ip6;
   5252 
   5253 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5254 				ip6.ip6_plen = 0;
   5255 				m_copyback(m0,
   5256 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5257 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5258 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5259 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5260 			}
   5261 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5262 			    sizeof(th.th_sum), &th.th_sum);
   5263 
   5264 			tcp_hlen = th.th_off << 2;
   5265 		} else {
   5266 			/*
   5267 			 * TCP/IP headers are in the first mbuf; we can do
   5268 			 * this the easy way.
   5269 			 */
   5270 			struct tcphdr *th;
   5271 
   5272 			if (v4) {
   5273 				struct ip *ip =
   5274 				    (void *)(mtod(m0, char *) + offset);
   5275 				th = (void *)(mtod(m0, char *) + hlen);
   5276 
   5277 				ip->ip_len = 0;
   5278 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5279 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5280 			} else {
   5281 				struct ip6_hdr *ip6 =
   5282 				    (void *)(mtod(m0, char *) + offset);
   5283 				th = (void *)(mtod(m0, char *) + hlen);
   5284 
   5285 				ip6->ip6_plen = 0;
   5286 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5287 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5288 			}
   5289 			tcp_hlen = th->th_off << 2;
   5290 		}
   5291 		hlen += tcp_hlen;
   5292 		*cmdlenp |= NQTX_CMD_TSE;
   5293 
   5294 		if (v4) {
   5295 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   5296 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   5297 		} else {
   5298 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   5299 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   5300 		}
   5301 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   5302 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   5303 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   5304 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   5305 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   5306 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   5307 	} else {
   5308 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   5309 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   5310 	}
   5311 
   5312 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   5313 		*fieldsp |= NQTXD_FIELDS_IXSM;
   5314 		cmdc |= NQTXC_CMD_IP4;
   5315 	}
   5316 
   5317 	if (m0->m_pkthdr.csum_flags &
   5318 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   5319 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   5320 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   5321 			cmdc |= NQTXC_CMD_TCP;
   5322 		} else {
   5323 			cmdc |= NQTXC_CMD_UDP;
   5324 		}
   5325 		cmdc |= NQTXC_CMD_IP4;
   5326 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   5327 	}
   5328 	if (m0->m_pkthdr.csum_flags &
   5329 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   5330 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   5331 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   5332 			cmdc |= NQTXC_CMD_TCP;
   5333 		} else {
   5334 			cmdc |= NQTXC_CMD_UDP;
   5335 		}
   5336 		cmdc |= NQTXC_CMD_IP6;
   5337 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   5338 	}
   5339 
   5340 	/* Fill in the context descriptor. */
   5341 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   5342 	    htole32(vl_len);
   5343 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   5344 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   5345 	    htole32(cmdc);
   5346 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   5347 	    htole32(mssidx);
   5348 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   5349 	DPRINTF(WM_DEBUG_TX,
   5350 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   5351 	    sc->sc_txnext, 0, vl_len));
   5352 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   5353 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   5354 	txs->txs_ndesc++;
   5355 	return 0;
   5356 }
   5357 
   5358 /*
   5359  * wm_nq_start:		[ifnet interface function]
   5360  *
   5361  *	Start packet transmission on the interface for NEWQUEUE devices
   5362  */
   5363 static void
   5364 wm_nq_start(struct ifnet *ifp)
   5365 {
   5366 	struct wm_softc *sc = ifp->if_softc;
   5367 
   5368 	WM_TX_LOCK(sc);
   5369 	if (!sc->sc_stopping)
   5370 		wm_nq_start_locked(ifp);
   5371 	WM_TX_UNLOCK(sc);
   5372 }
   5373 
   5374 static void
   5375 wm_nq_start_locked(struct ifnet *ifp)
   5376 {
   5377 	struct wm_softc *sc = ifp->if_softc;
   5378 	struct mbuf *m0;
   5379 	struct m_tag *mtag;
   5380 	struct wm_txsoft *txs;
   5381 	bus_dmamap_t dmamap;
   5382 	int error, nexttx, lasttx = -1, seg, segs_needed;
   5383 	bool do_csum, sent;
   5384 
   5385 	KASSERT(WM_TX_LOCKED(sc));
   5386 
   5387 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   5388 		return;
   5389 
   5390 	sent = false;
   5391 
   5392 	/*
   5393 	 * Loop through the send queue, setting up transmit descriptors
   5394 	 * until we drain the queue, or use up all available transmit
   5395 	 * descriptors.
   5396 	 */
   5397 	for (;;) {
   5398 		m0 = NULL;
   5399 
   5400 		/* Get a work queue entry. */
   5401 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   5402 			wm_txintr(sc);
   5403 			if (sc->sc_txsfree == 0) {
   5404 				DPRINTF(WM_DEBUG_TX,
   5405 				    ("%s: TX: no free job descriptors\n",
   5406 					device_xname(sc->sc_dev)));
   5407 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   5408 				break;
   5409 			}
   5410 		}
   5411 
   5412 		/* Grab a packet off the queue. */
   5413 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   5414 		if (m0 == NULL)
   5415 			break;
   5416 
   5417 		DPRINTF(WM_DEBUG_TX,
   5418 		    ("%s: TX: have packet to transmit: %p\n",
   5419 		    device_xname(sc->sc_dev), m0));
   5420 
   5421 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   5422 		dmamap = txs->txs_dmamap;
   5423 
   5424 		/*
   5425 		 * Load the DMA map.  If this fails, the packet either
   5426 		 * didn't fit in the allotted number of segments, or we
   5427 		 * were short on resources.  For the too-many-segments
   5428 		 * case, we simply report an error and drop the packet,
   5429 		 * since we can't sanely copy a jumbo packet to a single
   5430 		 * buffer.
   5431 		 */
   5432 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   5433 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   5434 		if (error) {
   5435 			if (error == EFBIG) {
   5436 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5437 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5438 				    "DMA segments, dropping...\n",
   5439 				    device_xname(sc->sc_dev));
   5440 				wm_dump_mbuf_chain(sc, m0);
   5441 				m_freem(m0);
   5442 				continue;
   5443 			}
   5444 			/* Short on resources, just stop for now. */
   5445 			DPRINTF(WM_DEBUG_TX,
   5446 			    ("%s: TX: dmamap load failed: %d\n",
   5447 			    device_xname(sc->sc_dev), error));
   5448 			break;
   5449 		}
   5450 
   5451 		segs_needed = dmamap->dm_nsegs;
   5452 
   5453 		/*
   5454 		 * Ensure we have enough descriptors free to describe
   5455 		 * the packet.  Note, we always reserve one descriptor
   5456 		 * at the end of the ring due to the semantics of the
   5457 		 * TDT register, plus one more in the event we need
   5458 		 * to load offload context.
   5459 		 */
   5460 		if (segs_needed > sc->sc_txfree - 2) {
   5461 			/*
   5462 			 * Not enough free descriptors to transmit this
   5463 			 * packet.  We haven't committed anything yet,
   5464 			 * so just unload the DMA map, put the packet
   5465 			 * pack on the queue, and punt.  Notify the upper
   5466 			 * layer that there are no more slots left.
   5467 			 */
   5468 			DPRINTF(WM_DEBUG_TX,
   5469 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5470 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5471 			    segs_needed, sc->sc_txfree - 1));
   5472 			ifp->if_flags |= IFF_OACTIVE;
   5473 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5474 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5475 			break;
   5476 		}
   5477 
   5478 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5479 
   5480 		DPRINTF(WM_DEBUG_TX,
   5481 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5482 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5483 
   5484 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5485 
   5486 		/*
   5487 		 * Store a pointer to the packet so that we can free it
   5488 		 * later.
   5489 		 *
   5490 		 * Initially, we consider the number of descriptors the
   5491 		 * packet uses the number of DMA segments.  This may be
   5492 		 * incremented by 1 if we do checksum offload (a descriptor
   5493 		 * is used to set the checksum context).
   5494 		 */
   5495 		txs->txs_mbuf = m0;
   5496 		txs->txs_firstdesc = sc->sc_txnext;
   5497 		txs->txs_ndesc = segs_needed;
   5498 
   5499 		/* Set up offload parameters for this packet. */
   5500 		uint32_t cmdlen, fields, dcmdlen;
   5501 		if (m0->m_pkthdr.csum_flags &
   5502 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5503 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5504 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5505 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   5506 			    &do_csum) != 0) {
   5507 				/* Error message already displayed. */
   5508 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5509 				continue;
   5510 			}
   5511 		} else {
   5512 			do_csum = false;
   5513 			cmdlen = 0;
   5514 			fields = 0;
   5515 		}
   5516 
   5517 		/* Sync the DMA map. */
   5518 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5519 		    BUS_DMASYNC_PREWRITE);
   5520 
   5521 		/* Initialize the first transmit descriptor. */
   5522 		nexttx = sc->sc_txnext;
   5523 		if (!do_csum) {
   5524 			/* setup a legacy descriptor */
   5525 			wm_set_dma_addr(
   5526 			    &sc->sc_txdescs[nexttx].wtx_addr,
   5527 			    dmamap->dm_segs[0].ds_addr);
   5528 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   5529 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   5530 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   5531 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   5532 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   5533 			    NULL) {
   5534 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   5535 				    htole32(WTX_CMD_VLE);
   5536 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   5537 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5538 			} else {
   5539 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5540 			}
   5541 			dcmdlen = 0;
   5542 		} else {
   5543 			/* setup an advanced data descriptor */
   5544 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5545 			    htole64(dmamap->dm_segs[0].ds_addr);
   5546 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   5547 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5548 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   5549 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   5550 			    htole32(fields);
   5551 			DPRINTF(WM_DEBUG_TX,
   5552 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   5553 			    device_xname(sc->sc_dev), nexttx,
   5554 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   5555 			DPRINTF(WM_DEBUG_TX,
   5556 			    ("\t 0x%08x%08x\n", fields,
   5557 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   5558 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   5559 		}
   5560 
   5561 		lasttx = nexttx;
   5562 		nexttx = WM_NEXTTX(sc, nexttx);
   5563 		/*
   5564 		 * fill in the next descriptors. legacy or adcanced format
   5565 		 * is the same here
   5566 		 */
   5567 		for (seg = 1; seg < dmamap->dm_nsegs;
   5568 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   5569 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5570 			    htole64(dmamap->dm_segs[seg].ds_addr);
   5571 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5572 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   5573 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   5574 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   5575 			lasttx = nexttx;
   5576 
   5577 			DPRINTF(WM_DEBUG_TX,
   5578 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   5579 			     "len %#04zx\n",
   5580 			    device_xname(sc->sc_dev), nexttx,
   5581 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   5582 			    dmamap->dm_segs[seg].ds_len));
   5583 		}
   5584 
   5585 		KASSERT(lasttx != -1);
   5586 
   5587 		/*
   5588 		 * Set up the command byte on the last descriptor of
   5589 		 * the packet.  If we're in the interrupt delay window,
   5590 		 * delay the interrupt.
   5591 		 */
   5592 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   5593 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   5594 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5595 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5596 
   5597 		txs->txs_lastdesc = lasttx;
   5598 
   5599 		DPRINTF(WM_DEBUG_TX,
   5600 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5601 		    device_xname(sc->sc_dev),
   5602 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5603 
   5604 		/* Sync the descriptors we're using. */
   5605 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5606 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5607 
   5608 		/* Give the packet to the chip. */
   5609 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5610 		sent = true;
   5611 
   5612 		DPRINTF(WM_DEBUG_TX,
   5613 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5614 
   5615 		DPRINTF(WM_DEBUG_TX,
   5616 		    ("%s: TX: finished transmitting packet, job %d\n",
   5617 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5618 
   5619 		/* Advance the tx pointer. */
   5620 		sc->sc_txfree -= txs->txs_ndesc;
   5621 		sc->sc_txnext = nexttx;
   5622 
   5623 		sc->sc_txsfree--;
   5624 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5625 
   5626 		/* Pass the packet to any BPF listeners. */
   5627 		bpf_mtap(ifp, m0);
   5628 	}
   5629 
   5630 	if (m0 != NULL) {
   5631 		ifp->if_flags |= IFF_OACTIVE;
   5632 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5633 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5634 		m_freem(m0);
   5635 	}
   5636 
   5637 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5638 		/* No more slots; notify upper layer. */
   5639 		ifp->if_flags |= IFF_OACTIVE;
   5640 	}
   5641 
   5642 	if (sent) {
   5643 		/* Set a watchdog timer in case the chip flakes out. */
   5644 		ifp->if_timer = 5;
   5645 	}
   5646 }
   5647 
   5648 /* Interrupt */
   5649 
   5650 /*
   5651  * wm_txintr:
   5652  *
   5653  *	Helper; handle transmit interrupts.
   5654  */
   5655 static void
   5656 wm_txintr(struct wm_softc *sc)
   5657 {
   5658 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5659 	struct wm_txsoft *txs;
   5660 	uint8_t status;
   5661 	int i;
   5662 
   5663 	if (sc->sc_stopping)
   5664 		return;
   5665 
   5666 	ifp->if_flags &= ~IFF_OACTIVE;
   5667 
   5668 	/*
   5669 	 * Go through the Tx list and free mbufs for those
   5670 	 * frames which have been transmitted.
   5671 	 */
   5672 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   5673 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   5674 		txs = &sc->sc_txsoft[i];
   5675 
   5676 		DPRINTF(WM_DEBUG_TX,
   5677 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   5678 
   5679 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   5680 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5681 
   5682 		status =
   5683 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   5684 		if ((status & WTX_ST_DD) == 0) {
   5685 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   5686 			    BUS_DMASYNC_PREREAD);
   5687 			break;
   5688 		}
   5689 
   5690 		DPRINTF(WM_DEBUG_TX,
   5691 		    ("%s: TX: job %d done: descs %d..%d\n",
   5692 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   5693 		    txs->txs_lastdesc));
   5694 
   5695 		/*
   5696 		 * XXX We should probably be using the statistics
   5697 		 * XXX registers, but I don't know if they exist
   5698 		 * XXX on chips before the i82544.
   5699 		 */
   5700 
   5701 #ifdef WM_EVENT_COUNTERS
   5702 		if (status & WTX_ST_TU)
   5703 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   5704 #endif /* WM_EVENT_COUNTERS */
   5705 
   5706 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   5707 			ifp->if_oerrors++;
   5708 			if (status & WTX_ST_LC)
   5709 				log(LOG_WARNING, "%s: late collision\n",
   5710 				    device_xname(sc->sc_dev));
   5711 			else if (status & WTX_ST_EC) {
   5712 				ifp->if_collisions += 16;
   5713 				log(LOG_WARNING, "%s: excessive collisions\n",
   5714 				    device_xname(sc->sc_dev));
   5715 			}
   5716 		} else
   5717 			ifp->if_opackets++;
   5718 
   5719 		sc->sc_txfree += txs->txs_ndesc;
   5720 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   5721 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   5722 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5723 		m_freem(txs->txs_mbuf);
   5724 		txs->txs_mbuf = NULL;
   5725 	}
   5726 
   5727 	/* Update the dirty transmit buffer pointer. */
   5728 	sc->sc_txsdirty = i;
   5729 	DPRINTF(WM_DEBUG_TX,
   5730 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   5731 
   5732 	/*
   5733 	 * If there are no more pending transmissions, cancel the watchdog
   5734 	 * timer.
   5735 	 */
   5736 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   5737 		ifp->if_timer = 0;
   5738 }
   5739 
   5740 /*
   5741  * wm_rxintr:
   5742  *
   5743  *	Helper; handle receive interrupts.
   5744  */
   5745 static void
   5746 wm_rxintr(struct wm_softc *sc)
   5747 {
   5748 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5749 	struct wm_rxsoft *rxs;
   5750 	struct mbuf *m;
   5751 	int i, len;
   5752 	uint8_t status, errors;
   5753 	uint16_t vlantag;
   5754 
   5755 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   5756 		rxs = &sc->sc_rxsoft[i];
   5757 
   5758 		DPRINTF(WM_DEBUG_RX,
   5759 		    ("%s: RX: checking descriptor %d\n",
   5760 		    device_xname(sc->sc_dev), i));
   5761 
   5762 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5763 
   5764 		status = sc->sc_rxdescs[i].wrx_status;
   5765 		errors = sc->sc_rxdescs[i].wrx_errors;
   5766 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   5767 		vlantag = sc->sc_rxdescs[i].wrx_special;
   5768 
   5769 		if ((status & WRX_ST_DD) == 0) {
   5770 			/* We have processed all of the receive descriptors. */
   5771 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   5772 			break;
   5773 		}
   5774 
   5775 		if (__predict_false(sc->sc_rxdiscard)) {
   5776 			DPRINTF(WM_DEBUG_RX,
   5777 			    ("%s: RX: discarding contents of descriptor %d\n",
   5778 			    device_xname(sc->sc_dev), i));
   5779 			WM_INIT_RXDESC(sc, i);
   5780 			if (status & WRX_ST_EOP) {
   5781 				/* Reset our state. */
   5782 				DPRINTF(WM_DEBUG_RX,
   5783 				    ("%s: RX: resetting rxdiscard -> 0\n",
   5784 				    device_xname(sc->sc_dev)));
   5785 				sc->sc_rxdiscard = 0;
   5786 			}
   5787 			continue;
   5788 		}
   5789 
   5790 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5791 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   5792 
   5793 		m = rxs->rxs_mbuf;
   5794 
   5795 		/*
   5796 		 * Add a new receive buffer to the ring, unless of
   5797 		 * course the length is zero. Treat the latter as a
   5798 		 * failed mapping.
   5799 		 */
   5800 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   5801 			/*
   5802 			 * Failed, throw away what we've done so
   5803 			 * far, and discard the rest of the packet.
   5804 			 */
   5805 			ifp->if_ierrors++;
   5806 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5807 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5808 			WM_INIT_RXDESC(sc, i);
   5809 			if ((status & WRX_ST_EOP) == 0)
   5810 				sc->sc_rxdiscard = 1;
   5811 			if (sc->sc_rxhead != NULL)
   5812 				m_freem(sc->sc_rxhead);
   5813 			WM_RXCHAIN_RESET(sc);
   5814 			DPRINTF(WM_DEBUG_RX,
   5815 			    ("%s: RX: Rx buffer allocation failed, "
   5816 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   5817 			    sc->sc_rxdiscard ? " (discard)" : ""));
   5818 			continue;
   5819 		}
   5820 
   5821 		m->m_len = len;
   5822 		sc->sc_rxlen += len;
   5823 		DPRINTF(WM_DEBUG_RX,
   5824 		    ("%s: RX: buffer at %p len %d\n",
   5825 		    device_xname(sc->sc_dev), m->m_data, len));
   5826 
   5827 		/* If this is not the end of the packet, keep looking. */
   5828 		if ((status & WRX_ST_EOP) == 0) {
   5829 			WM_RXCHAIN_LINK(sc, m);
   5830 			DPRINTF(WM_DEBUG_RX,
   5831 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   5832 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   5833 			continue;
   5834 		}
   5835 
   5836 		/*
   5837 		 * Okay, we have the entire packet now.  The chip is
   5838 		 * configured to include the FCS except I350 and I21[01]
   5839 		 * (not all chips can be configured to strip it),
   5840 		 * so we need to trim it.
   5841 		 * May need to adjust length of previous mbuf in the
   5842 		 * chain if the current mbuf is too short.
   5843 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   5844 		 * is always set in I350, so we don't trim it.
   5845 		 */
   5846 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   5847 		    && (sc->sc_type != WM_T_I210)
   5848 		    && (sc->sc_type != WM_T_I211)) {
   5849 			if (m->m_len < ETHER_CRC_LEN) {
   5850 				sc->sc_rxtail->m_len
   5851 				    -= (ETHER_CRC_LEN - m->m_len);
   5852 				m->m_len = 0;
   5853 			} else
   5854 				m->m_len -= ETHER_CRC_LEN;
   5855 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   5856 		} else
   5857 			len = sc->sc_rxlen;
   5858 
   5859 		WM_RXCHAIN_LINK(sc, m);
   5860 
   5861 		*sc->sc_rxtailp = NULL;
   5862 		m = sc->sc_rxhead;
   5863 
   5864 		WM_RXCHAIN_RESET(sc);
   5865 
   5866 		DPRINTF(WM_DEBUG_RX,
   5867 		    ("%s: RX: have entire packet, len -> %d\n",
   5868 		    device_xname(sc->sc_dev), len));
   5869 
   5870 		/* If an error occurred, update stats and drop the packet. */
   5871 		if (errors &
   5872 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   5873 			if (errors & WRX_ER_SE)
   5874 				log(LOG_WARNING, "%s: symbol error\n",
   5875 				    device_xname(sc->sc_dev));
   5876 			else if (errors & WRX_ER_SEQ)
   5877 				log(LOG_WARNING, "%s: receive sequence error\n",
   5878 				    device_xname(sc->sc_dev));
   5879 			else if (errors & WRX_ER_CE)
   5880 				log(LOG_WARNING, "%s: CRC error\n",
   5881 				    device_xname(sc->sc_dev));
   5882 			m_freem(m);
   5883 			continue;
   5884 		}
   5885 
   5886 		/* No errors.  Receive the packet. */
   5887 		m->m_pkthdr.rcvif = ifp;
   5888 		m->m_pkthdr.len = len;
   5889 
   5890 		/*
   5891 		 * If VLANs are enabled, VLAN packets have been unwrapped
   5892 		 * for us.  Associate the tag with the packet.
   5893 		 */
   5894 		/* XXXX should check for i350 and i354 */
   5895 		if ((status & WRX_ST_VP) != 0) {
   5896 			VLAN_INPUT_TAG(ifp, m,
   5897 			    le16toh(vlantag),
   5898 			    continue);
   5899 		}
   5900 
   5901 		/* Set up checksum info for this packet. */
   5902 		if ((status & WRX_ST_IXSM) == 0) {
   5903 			if (status & WRX_ST_IPCS) {
   5904 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   5905 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   5906 				if (errors & WRX_ER_IPE)
   5907 					m->m_pkthdr.csum_flags |=
   5908 					    M_CSUM_IPv4_BAD;
   5909 			}
   5910 			if (status & WRX_ST_TCPCS) {
   5911 				/*
   5912 				 * Note: we don't know if this was TCP or UDP,
   5913 				 * so we just set both bits, and expect the
   5914 				 * upper layers to deal.
   5915 				 */
   5916 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   5917 				m->m_pkthdr.csum_flags |=
   5918 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   5919 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   5920 				if (errors & WRX_ER_TCPE)
   5921 					m->m_pkthdr.csum_flags |=
   5922 					    M_CSUM_TCP_UDP_BAD;
   5923 			}
   5924 		}
   5925 
   5926 		ifp->if_ipackets++;
   5927 
   5928 		WM_RX_UNLOCK(sc);
   5929 
   5930 		/* Pass this up to any BPF listeners. */
   5931 		bpf_mtap(ifp, m);
   5932 
   5933 		/* Pass it on. */
   5934 		(*ifp->if_input)(ifp, m);
   5935 
   5936 		WM_RX_LOCK(sc);
   5937 
   5938 		if (sc->sc_stopping)
   5939 			break;
   5940 	}
   5941 
   5942 	/* Update the receive pointer. */
   5943 	sc->sc_rxptr = i;
   5944 
   5945 	DPRINTF(WM_DEBUG_RX,
   5946 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   5947 }
   5948 
   5949 /*
   5950  * wm_linkintr_gmii:
   5951  *
   5952  *	Helper; handle link interrupts for GMII.
   5953  */
   5954 static void
   5955 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   5956 {
   5957 
   5958 	KASSERT(WM_TX_LOCKED(sc));
   5959 
   5960 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5961 		__func__));
   5962 
   5963 	if (icr & ICR_LSC) {
   5964 		DPRINTF(WM_DEBUG_LINK,
   5965 		    ("%s: LINK: LSC -> mii_pollstat\n",
   5966 			device_xname(sc->sc_dev)));
   5967 		mii_pollstat(&sc->sc_mii);
   5968 		if (sc->sc_type == WM_T_82543) {
   5969 			int miistatus, active;
   5970 
   5971 			/*
   5972 			 * With 82543, we need to force speed and
   5973 			 * duplex on the MAC equal to what the PHY
   5974 			 * speed and duplex configuration is.
   5975 			 */
   5976 			miistatus = sc->sc_mii.mii_media_status;
   5977 
   5978 			if (miistatus & IFM_ACTIVE) {
   5979 				active = sc->sc_mii.mii_media_active;
   5980 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   5981 				switch (IFM_SUBTYPE(active)) {
   5982 				case IFM_10_T:
   5983 					sc->sc_ctrl |= CTRL_SPEED_10;
   5984 					break;
   5985 				case IFM_100_TX:
   5986 					sc->sc_ctrl |= CTRL_SPEED_100;
   5987 					break;
   5988 				case IFM_1000_T:
   5989 					sc->sc_ctrl |= CTRL_SPEED_1000;
   5990 					break;
   5991 				default:
   5992 					/*
   5993 					 * fiber?
   5994 					 * Shoud not enter here.
   5995 					 */
   5996 					printf("unknown media (%x)\n",
   5997 					    active);
   5998 					break;
   5999 				}
   6000 				if (active & IFM_FDX)
   6001 					sc->sc_ctrl |= CTRL_FD;
   6002 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6003 			}
   6004 		} else if ((sc->sc_type == WM_T_ICH8)
   6005 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   6006 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   6007 		} else if (sc->sc_type == WM_T_PCH) {
   6008 			wm_k1_gig_workaround_hv(sc,
   6009 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   6010 		}
   6011 
   6012 		if ((sc->sc_phytype == WMPHY_82578)
   6013 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   6014 			== IFM_1000_T)) {
   6015 
   6016 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   6017 				delay(200*1000); /* XXX too big */
   6018 
   6019 				/* Link stall fix for link up */
   6020 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6021 				    HV_MUX_DATA_CTRL,
   6022 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   6023 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   6024 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   6025 				    HV_MUX_DATA_CTRL,
   6026 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   6027 			}
   6028 		}
   6029 	} else if (icr & ICR_RXSEQ) {
   6030 		DPRINTF(WM_DEBUG_LINK,
   6031 		    ("%s: LINK Receive sequence error\n",
   6032 			device_xname(sc->sc_dev)));
   6033 	}
   6034 }
   6035 
   6036 /*
   6037  * wm_linkintr_tbi:
   6038  *
   6039  *	Helper; handle link interrupts for TBI mode.
   6040  */
   6041 static void
   6042 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   6043 {
   6044 	uint32_t status;
   6045 
   6046 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6047 		__func__));
   6048 
   6049 	status = CSR_READ(sc, WMREG_STATUS);
   6050 	if (icr & ICR_LSC) {
   6051 		if (status & STATUS_LU) {
   6052 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   6053 			    device_xname(sc->sc_dev),
   6054 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   6055 			/*
   6056 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   6057 			 * so we should update sc->sc_ctrl
   6058 			 */
   6059 
   6060 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   6061 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6062 			sc->sc_fcrtl &= ~FCRTL_XONE;
   6063 			if (status & STATUS_FD)
   6064 				sc->sc_tctl |=
   6065 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6066 			else
   6067 				sc->sc_tctl |=
   6068 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6069 			if (sc->sc_ctrl & CTRL_TFCE)
   6070 				sc->sc_fcrtl |= FCRTL_XONE;
   6071 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6072 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   6073 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   6074 				      sc->sc_fcrtl);
   6075 			sc->sc_tbi_linkup = 1;
   6076 		} else {
   6077 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   6078 			    device_xname(sc->sc_dev)));
   6079 			sc->sc_tbi_linkup = 0;
   6080 		}
   6081 		/* Update LED */
   6082 		wm_tbi_serdes_set_linkled(sc);
   6083 	} else if (icr & ICR_RXSEQ) {
   6084 		DPRINTF(WM_DEBUG_LINK,
   6085 		    ("%s: LINK: Receive sequence error\n",
   6086 		    device_xname(sc->sc_dev)));
   6087 	}
   6088 }
   6089 
   6090 /*
   6091  * wm_linkintr_serdes:
   6092  *
   6093  *	Helper; handle link interrupts for TBI mode.
   6094  */
   6095 static void
   6096 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   6097 {
   6098 	struct mii_data *mii = &sc->sc_mii;
   6099 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6100 	uint32_t pcs_adv, pcs_lpab, reg;
   6101 
   6102 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   6103 		__func__));
   6104 
   6105 	if (icr & ICR_LSC) {
   6106 		/* Check PCS */
   6107 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6108 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   6109 			mii->mii_media_status |= IFM_ACTIVE;
   6110 			sc->sc_tbi_linkup = 1;
   6111 		} else {
   6112 			mii->mii_media_status |= IFM_NONE;
   6113 			sc->sc_tbi_linkup = 0;
   6114 			wm_tbi_serdes_set_linkled(sc);
   6115 			return;
   6116 		}
   6117 		mii->mii_media_active |= IFM_1000_SX;
   6118 		if ((reg & PCS_LSTS_FDX) != 0)
   6119 			mii->mii_media_active |= IFM_FDX;
   6120 		else
   6121 			mii->mii_media_active |= IFM_HDX;
   6122 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6123 			/* Check flow */
   6124 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   6125 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   6126 				DPRINTF(WM_DEBUG_LINK,
   6127 				    ("XXX LINKOK but not ACOMP\n"));
   6128 				return;
   6129 			}
   6130 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   6131 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   6132 			DPRINTF(WM_DEBUG_LINK,
   6133 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   6134 			if ((pcs_adv & TXCW_SYM_PAUSE)
   6135 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   6136 				mii->mii_media_active |= IFM_FLOW
   6137 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   6138 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   6139 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6140 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   6141 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6142 				mii->mii_media_active |= IFM_FLOW
   6143 				    | IFM_ETH_TXPAUSE;
   6144 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   6145 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   6146 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   6147 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   6148 				mii->mii_media_active |= IFM_FLOW
   6149 				    | IFM_ETH_RXPAUSE;
   6150 		}
   6151 		/* Update LED */
   6152 		wm_tbi_serdes_set_linkled(sc);
   6153 	} else {
   6154 		DPRINTF(WM_DEBUG_LINK,
   6155 		    ("%s: LINK: Receive sequence error\n",
   6156 		    device_xname(sc->sc_dev)));
   6157 	}
   6158 }
   6159 
   6160 /*
   6161  * wm_linkintr:
   6162  *
   6163  *	Helper; handle link interrupts.
   6164  */
   6165 static void
   6166 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   6167 {
   6168 
   6169 	if (sc->sc_flags & WM_F_HAS_MII)
   6170 		wm_linkintr_gmii(sc, icr);
   6171 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   6172 	    && (sc->sc_type >= WM_T_82575))
   6173 		wm_linkintr_serdes(sc, icr);
   6174 	else
   6175 		wm_linkintr_tbi(sc, icr);
   6176 }
   6177 
   6178 /*
   6179  * wm_intr:
   6180  *
   6181  *	Interrupt service routine.
   6182  */
   6183 static int
   6184 wm_intr(void *arg)
   6185 {
   6186 	struct wm_softc *sc = arg;
   6187 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6188 	uint32_t icr;
   6189 	int handled = 0;
   6190 
   6191 	while (1 /* CONSTCOND */) {
   6192 		icr = CSR_READ(sc, WMREG_ICR);
   6193 		if ((icr & sc->sc_icr) == 0)
   6194 			break;
   6195 		rnd_add_uint32(&sc->rnd_source, icr);
   6196 
   6197 		WM_RX_LOCK(sc);
   6198 
   6199 		if (sc->sc_stopping) {
   6200 			WM_RX_UNLOCK(sc);
   6201 			break;
   6202 		}
   6203 
   6204 		handled = 1;
   6205 
   6206 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   6207 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   6208 			DPRINTF(WM_DEBUG_RX,
   6209 			    ("%s: RX: got Rx intr 0x%08x\n",
   6210 			    device_xname(sc->sc_dev),
   6211 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   6212 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   6213 		}
   6214 #endif
   6215 		wm_rxintr(sc);
   6216 
   6217 		WM_RX_UNLOCK(sc);
   6218 		WM_TX_LOCK(sc);
   6219 
   6220 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   6221 		if (icr & ICR_TXDW) {
   6222 			DPRINTF(WM_DEBUG_TX,
   6223 			    ("%s: TX: got TXDW interrupt\n",
   6224 			    device_xname(sc->sc_dev)));
   6225 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   6226 		}
   6227 #endif
   6228 		wm_txintr(sc);
   6229 
   6230 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   6231 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   6232 			wm_linkintr(sc, icr);
   6233 		}
   6234 
   6235 		WM_TX_UNLOCK(sc);
   6236 
   6237 		if (icr & ICR_RXO) {
   6238 #if defined(WM_DEBUG)
   6239 			log(LOG_WARNING, "%s: Receive overrun\n",
   6240 			    device_xname(sc->sc_dev));
   6241 #endif /* defined(WM_DEBUG) */
   6242 		}
   6243 	}
   6244 
   6245 	if (handled) {
   6246 		/* Try to get more packets going. */
   6247 		ifp->if_start(ifp);
   6248 	}
   6249 
   6250 	return handled;
   6251 }
   6252 
   6253 /*
   6254  * Media related.
   6255  * GMII, SGMII, TBI (and SERDES)
   6256  */
   6257 
   6258 /* Common */
   6259 
   6260 /*
   6261  * wm_tbi_serdes_set_linkled:
   6262  *
   6263  *	Update the link LED on TBI and SERDES devices.
   6264  */
   6265 static void
   6266 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   6267 {
   6268 
   6269 	if (sc->sc_tbi_linkup)
   6270 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   6271 	else
   6272 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   6273 
   6274 	/* 82540 or newer devices are active low */
   6275 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   6276 
   6277 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6278 }
   6279 
   6280 /* GMII related */
   6281 
   6282 /*
   6283  * wm_gmii_reset:
   6284  *
   6285  *	Reset the PHY.
   6286  */
   6287 static void
   6288 wm_gmii_reset(struct wm_softc *sc)
   6289 {
   6290 	uint32_t reg;
   6291 	int rv;
   6292 
   6293 	/* get phy semaphore */
   6294 	switch (sc->sc_type) {
   6295 	case WM_T_82571:
   6296 	case WM_T_82572:
   6297 	case WM_T_82573:
   6298 	case WM_T_82574:
   6299 	case WM_T_82583:
   6300 		 /* XXX should get sw semaphore, too */
   6301 		rv = wm_get_swsm_semaphore(sc);
   6302 		break;
   6303 	case WM_T_82575:
   6304 	case WM_T_82576:
   6305 	case WM_T_82580:
   6306 	case WM_T_I350:
   6307 	case WM_T_I354:
   6308 	case WM_T_I210:
   6309 	case WM_T_I211:
   6310 	case WM_T_80003:
   6311 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6312 		break;
   6313 	case WM_T_ICH8:
   6314 	case WM_T_ICH9:
   6315 	case WM_T_ICH10:
   6316 	case WM_T_PCH:
   6317 	case WM_T_PCH2:
   6318 	case WM_T_PCH_LPT:
   6319 		rv = wm_get_swfwhw_semaphore(sc);
   6320 		break;
   6321 	default:
   6322 		/* nothing to do*/
   6323 		rv = 0;
   6324 		break;
   6325 	}
   6326 	if (rv != 0) {
   6327 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6328 		    __func__);
   6329 		return;
   6330 	}
   6331 
   6332 	switch (sc->sc_type) {
   6333 	case WM_T_82542_2_0:
   6334 	case WM_T_82542_2_1:
   6335 		/* null */
   6336 		break;
   6337 	case WM_T_82543:
   6338 		/*
   6339 		 * With 82543, we need to force speed and duplex on the MAC
   6340 		 * equal to what the PHY speed and duplex configuration is.
   6341 		 * In addition, we need to perform a hardware reset on the PHY
   6342 		 * to take it out of reset.
   6343 		 */
   6344 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6345 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6346 
   6347 		/* The PHY reset pin is active-low. */
   6348 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6349 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   6350 		    CTRL_EXT_SWDPIN(4));
   6351 		reg |= CTRL_EXT_SWDPIO(4);
   6352 
   6353 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6354 		CSR_WRITE_FLUSH(sc);
   6355 		delay(10*1000);
   6356 
   6357 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   6358 		CSR_WRITE_FLUSH(sc);
   6359 		delay(150);
   6360 #if 0
   6361 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   6362 #endif
   6363 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   6364 		break;
   6365 	case WM_T_82544:	/* reset 10000us */
   6366 	case WM_T_82540:
   6367 	case WM_T_82545:
   6368 	case WM_T_82545_3:
   6369 	case WM_T_82546:
   6370 	case WM_T_82546_3:
   6371 	case WM_T_82541:
   6372 	case WM_T_82541_2:
   6373 	case WM_T_82547:
   6374 	case WM_T_82547_2:
   6375 	case WM_T_82571:	/* reset 100us */
   6376 	case WM_T_82572:
   6377 	case WM_T_82573:
   6378 	case WM_T_82574:
   6379 	case WM_T_82575:
   6380 	case WM_T_82576:
   6381 	case WM_T_82580:
   6382 	case WM_T_I350:
   6383 	case WM_T_I354:
   6384 	case WM_T_I210:
   6385 	case WM_T_I211:
   6386 	case WM_T_82583:
   6387 	case WM_T_80003:
   6388 		/* generic reset */
   6389 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6390 		CSR_WRITE_FLUSH(sc);
   6391 		delay(20000);
   6392 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6393 		CSR_WRITE_FLUSH(sc);
   6394 		delay(20000);
   6395 
   6396 		if ((sc->sc_type == WM_T_82541)
   6397 		    || (sc->sc_type == WM_T_82541_2)
   6398 		    || (sc->sc_type == WM_T_82547)
   6399 		    || (sc->sc_type == WM_T_82547_2)) {
   6400 			/* workaround for igp are done in igp_reset() */
   6401 			/* XXX add code to set LED after phy reset */
   6402 		}
   6403 		break;
   6404 	case WM_T_ICH8:
   6405 	case WM_T_ICH9:
   6406 	case WM_T_ICH10:
   6407 	case WM_T_PCH:
   6408 	case WM_T_PCH2:
   6409 	case WM_T_PCH_LPT:
   6410 		/* generic reset */
   6411 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6412 		CSR_WRITE_FLUSH(sc);
   6413 		delay(100);
   6414 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6415 		CSR_WRITE_FLUSH(sc);
   6416 		delay(150);
   6417 		break;
   6418 	default:
   6419 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   6420 		    __func__);
   6421 		break;
   6422 	}
   6423 
   6424 	/* release PHY semaphore */
   6425 	switch (sc->sc_type) {
   6426 	case WM_T_82571:
   6427 	case WM_T_82572:
   6428 	case WM_T_82573:
   6429 	case WM_T_82574:
   6430 	case WM_T_82583:
   6431 		 /* XXX should put sw semaphore, too */
   6432 		wm_put_swsm_semaphore(sc);
   6433 		break;
   6434 	case WM_T_82575:
   6435 	case WM_T_82576:
   6436 	case WM_T_82580:
   6437 	case WM_T_I350:
   6438 	case WM_T_I354:
   6439 	case WM_T_I210:
   6440 	case WM_T_I211:
   6441 	case WM_T_80003:
   6442 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6443 		break;
   6444 	case WM_T_ICH8:
   6445 	case WM_T_ICH9:
   6446 	case WM_T_ICH10:
   6447 	case WM_T_PCH:
   6448 	case WM_T_PCH2:
   6449 	case WM_T_PCH_LPT:
   6450 		wm_put_swfwhw_semaphore(sc);
   6451 		break;
   6452 	default:
   6453 		/* nothing to do*/
   6454 		rv = 0;
   6455 		break;
   6456 	}
   6457 
   6458 	/* get_cfg_done */
   6459 	wm_get_cfg_done(sc);
   6460 
   6461 	/* extra setup */
   6462 	switch (sc->sc_type) {
   6463 	case WM_T_82542_2_0:
   6464 	case WM_T_82542_2_1:
   6465 	case WM_T_82543:
   6466 	case WM_T_82544:
   6467 	case WM_T_82540:
   6468 	case WM_T_82545:
   6469 	case WM_T_82545_3:
   6470 	case WM_T_82546:
   6471 	case WM_T_82546_3:
   6472 	case WM_T_82541_2:
   6473 	case WM_T_82547_2:
   6474 	case WM_T_82571:
   6475 	case WM_T_82572:
   6476 	case WM_T_82573:
   6477 	case WM_T_82574:
   6478 	case WM_T_82575:
   6479 	case WM_T_82576:
   6480 	case WM_T_82580:
   6481 	case WM_T_I350:
   6482 	case WM_T_I354:
   6483 	case WM_T_I210:
   6484 	case WM_T_I211:
   6485 	case WM_T_82583:
   6486 	case WM_T_80003:
   6487 		/* null */
   6488 		break;
   6489 	case WM_T_82541:
   6490 	case WM_T_82547:
   6491 		/* XXX Configure actively LED after PHY reset */
   6492 		break;
   6493 	case WM_T_ICH8:
   6494 	case WM_T_ICH9:
   6495 	case WM_T_ICH10:
   6496 	case WM_T_PCH:
   6497 	case WM_T_PCH2:
   6498 	case WM_T_PCH_LPT:
   6499 		/* Allow time for h/w to get to a quiescent state afer reset */
   6500 		delay(10*1000);
   6501 
   6502 		if (sc->sc_type == WM_T_PCH)
   6503 			wm_hv_phy_workaround_ich8lan(sc);
   6504 
   6505 		if (sc->sc_type == WM_T_PCH2)
   6506 			wm_lv_phy_workaround_ich8lan(sc);
   6507 
   6508 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   6509 			/*
   6510 			 * dummy read to clear the phy wakeup bit after lcd
   6511 			 * reset
   6512 			 */
   6513 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   6514 		}
   6515 
   6516 		/*
   6517 		 * XXX Configure the LCD with th extended configuration region
   6518 		 * in NVM
   6519 		 */
   6520 
   6521 		/* Configure the LCD with the OEM bits in NVM */
   6522 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6523 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   6524 			/*
   6525 			 * Disable LPLU.
   6526 			 * XXX It seems that 82567 has LPLU, too.
   6527 			 */
   6528 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6529 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6530 			reg |= HV_OEM_BITS_ANEGNOW;
   6531 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6532 		}
   6533 		break;
   6534 	default:
   6535 		panic("%s: unknown type\n", __func__);
   6536 		break;
   6537 	}
   6538 }
   6539 
   6540 /*
   6541  * wm_get_phy_id_82575:
   6542  *
   6543  * Return PHY ID. Return -1 if it failed.
   6544  */
   6545 static int
   6546 wm_get_phy_id_82575(struct wm_softc *sc)
   6547 {
   6548 	uint32_t reg;
   6549 	int phyid = -1;
   6550 
   6551 	/* XXX */
   6552 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   6553 		return -1;
   6554 
   6555 	if (wm_sgmii_uses_mdio(sc)) {
   6556 		switch (sc->sc_type) {
   6557 		case WM_T_82575:
   6558 		case WM_T_82576:
   6559 			reg = CSR_READ(sc, WMREG_MDIC);
   6560 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   6561 			break;
   6562 		case WM_T_82580:
   6563 		case WM_T_I350:
   6564 		case WM_T_I354:
   6565 		case WM_T_I210:
   6566 		case WM_T_I211:
   6567 			reg = CSR_READ(sc, WMREG_MDICNFG);
   6568 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   6569 			break;
   6570 		default:
   6571 			return -1;
   6572 		}
   6573 	}
   6574 
   6575 	return phyid;
   6576 }
   6577 
   6578 
   6579 /*
   6580  * wm_gmii_mediainit:
   6581  *
   6582  *	Initialize media for use on 1000BASE-T devices.
   6583  */
   6584 static void
   6585 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6586 {
   6587 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6588 	struct mii_data *mii = &sc->sc_mii;
   6589 	uint32_t reg;
   6590 
   6591 	/* We have GMII. */
   6592 	sc->sc_flags |= WM_F_HAS_MII;
   6593 
   6594 	if (sc->sc_type == WM_T_80003)
   6595 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6596 	else
   6597 		sc->sc_tipg = TIPG_1000T_DFLT;
   6598 
   6599 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   6600 	if ((sc->sc_type == WM_T_82580)
   6601 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   6602 	    || (sc->sc_type == WM_T_I211)) {
   6603 		reg = CSR_READ(sc, WMREG_PHPM);
   6604 		reg &= ~PHPM_GO_LINK_D;
   6605 		CSR_WRITE(sc, WMREG_PHPM, reg);
   6606 	}
   6607 
   6608 	/*
   6609 	 * Let the chip set speed/duplex on its own based on
   6610 	 * signals from the PHY.
   6611 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6612 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6613 	 */
   6614 	sc->sc_ctrl |= CTRL_SLU;
   6615 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6616 
   6617 	/* Initialize our media structures and probe the GMII. */
   6618 	mii->mii_ifp = ifp;
   6619 
   6620 	/*
   6621 	 * Determine the PHY access method.
   6622 	 *
   6623 	 *  For SGMII, use SGMII specific method.
   6624 	 *
   6625 	 *  For some devices, we can determine the PHY access method
   6626 	 * from sc_type.
   6627 	 *
   6628 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   6629 	 * access  method by sc_type, so use the PCI product ID for some
   6630 	 * devices.
   6631 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6632 	 * can't detect, then use bm's method.
   6633 	 */
   6634 	switch (prodid) {
   6635 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6636 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6637 		/* 82577 */
   6638 		sc->sc_phytype = WMPHY_82577;
   6639 		break;
   6640 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6641 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6642 		/* 82578 */
   6643 		sc->sc_phytype = WMPHY_82578;
   6644 		break;
   6645 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6646 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6647 		/* 82579 */
   6648 		sc->sc_phytype = WMPHY_82579;
   6649 		break;
   6650 	case PCI_PRODUCT_INTEL_82801I_BM:
   6651 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6652 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6653 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6654 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6655 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6656 		/* 82567 */
   6657 		sc->sc_phytype = WMPHY_BM;
   6658 		mii->mii_readreg = wm_gmii_bm_readreg;
   6659 		mii->mii_writereg = wm_gmii_bm_writereg;
   6660 		break;
   6661 	default:
   6662 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   6663 		    && !wm_sgmii_uses_mdio(sc)){
   6664 			/* SGMII */
   6665 			mii->mii_readreg = wm_sgmii_readreg;
   6666 			mii->mii_writereg = wm_sgmii_writereg;
   6667 		} else if (sc->sc_type >= WM_T_80003) {
   6668 			/* 80003 */
   6669 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6670 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6671 		} else if (sc->sc_type >= WM_T_I210) {
   6672 			/* I210 and I211 */
   6673 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   6674 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   6675 		} else if (sc->sc_type >= WM_T_82580) {
   6676 			/* 82580, I350 and I354 */
   6677 			sc->sc_phytype = WMPHY_82580;
   6678 			mii->mii_readreg = wm_gmii_82580_readreg;
   6679 			mii->mii_writereg = wm_gmii_82580_writereg;
   6680 		} else if (sc->sc_type >= WM_T_82544) {
   6681 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   6682 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6683 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6684 		} else {
   6685 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6686 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6687 		}
   6688 		break;
   6689 	}
   6690 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   6691 		/* All PCH* use _hv_ */
   6692 		mii->mii_readreg = wm_gmii_hv_readreg;
   6693 		mii->mii_writereg = wm_gmii_hv_writereg;
   6694 	}
   6695 	mii->mii_statchg = wm_gmii_statchg;
   6696 
   6697 	wm_gmii_reset(sc);
   6698 
   6699 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6700 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6701 	    wm_gmii_mediastatus);
   6702 
   6703 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6704 	    || (sc->sc_type == WM_T_82580)
   6705 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6706 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   6707 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6708 			/* Attach only one port */
   6709 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6710 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6711 		} else {
   6712 			int i, id;
   6713 			uint32_t ctrl_ext;
   6714 
   6715 			id = wm_get_phy_id_82575(sc);
   6716 			if (id != -1) {
   6717 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6718 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6719 			}
   6720 			if ((id == -1)
   6721 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6722 				/* Power on sgmii phy if it is disabled */
   6723 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6724 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   6725 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6726 				CSR_WRITE_FLUSH(sc);
   6727 				delay(300*1000); /* XXX too long */
   6728 
   6729 				/* from 1 to 8 */
   6730 				for (i = 1; i < 8; i++)
   6731 					mii_attach(sc->sc_dev, &sc->sc_mii,
   6732 					    0xffffffff, i, MII_OFFSET_ANY,
   6733 					    MIIF_DOPAUSE);
   6734 
   6735 				/* restore previous sfp cage power state */
   6736 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6737 			}
   6738 		}
   6739 	} else {
   6740 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6741 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6742 	}
   6743 
   6744 	/*
   6745 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6746 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6747 	 */
   6748 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6749 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6750 		wm_set_mdio_slow_mode_hv(sc);
   6751 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6752 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6753 	}
   6754 
   6755 	/*
   6756 	 * (For ICH8 variants)
   6757 	 * If PHY detection failed, use BM's r/w function and retry.
   6758 	 */
   6759 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6760 		/* if failed, retry with *_bm_* */
   6761 		mii->mii_readreg = wm_gmii_bm_readreg;
   6762 		mii->mii_writereg = wm_gmii_bm_writereg;
   6763 
   6764 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6765 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6766 	}
   6767 
   6768 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6769 		/* Any PHY wasn't find */
   6770 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6771 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6772 		sc->sc_phytype = WMPHY_NONE;
   6773 	} else {
   6774 		/*
   6775 		 * PHY Found!
   6776 		 * Check PHY type.
   6777 		 */
   6778 		uint32_t model;
   6779 		struct mii_softc *child;
   6780 
   6781 		child = LIST_FIRST(&mii->mii_phys);
   6782 		if (device_is_a(child->mii_dev, "igphy")) {
   6783 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6784 
   6785 			model = isc->sc_mii.mii_mpd_model;
   6786 			if (model == MII_MODEL_yyINTEL_I82566)
   6787 				sc->sc_phytype = WMPHY_IGP_3;
   6788 		}
   6789 
   6790 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6791 	}
   6792 }
   6793 
   6794 /*
   6795  * wm_gmii_mediachange:	[ifmedia interface function]
   6796  *
   6797  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6798  */
   6799 static int
   6800 wm_gmii_mediachange(struct ifnet *ifp)
   6801 {
   6802 	struct wm_softc *sc = ifp->if_softc;
   6803 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6804 	int rc;
   6805 
   6806 	if ((ifp->if_flags & IFF_UP) == 0)
   6807 		return 0;
   6808 
   6809 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6810 	sc->sc_ctrl |= CTRL_SLU;
   6811 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6812 	    || (sc->sc_type > WM_T_82543)) {
   6813 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6814 	} else {
   6815 		sc->sc_ctrl &= ~CTRL_ASDE;
   6816 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6817 		if (ife->ifm_media & IFM_FDX)
   6818 			sc->sc_ctrl |= CTRL_FD;
   6819 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6820 		case IFM_10_T:
   6821 			sc->sc_ctrl |= CTRL_SPEED_10;
   6822 			break;
   6823 		case IFM_100_TX:
   6824 			sc->sc_ctrl |= CTRL_SPEED_100;
   6825 			break;
   6826 		case IFM_1000_T:
   6827 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6828 			break;
   6829 		default:
   6830 			panic("wm_gmii_mediachange: bad media 0x%x",
   6831 			    ife->ifm_media);
   6832 		}
   6833 	}
   6834 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6835 	if (sc->sc_type <= WM_T_82543)
   6836 		wm_gmii_reset(sc);
   6837 
   6838 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6839 		return 0;
   6840 	return rc;
   6841 }
   6842 
   6843 /*
   6844  * wm_gmii_mediastatus:	[ifmedia interface function]
   6845  *
   6846  *	Get the current interface media status on a 1000BASE-T device.
   6847  */
   6848 static void
   6849 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6850 {
   6851 	struct wm_softc *sc = ifp->if_softc;
   6852 
   6853 	ether_mediastatus(ifp, ifmr);
   6854 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6855 	    | sc->sc_flowflags;
   6856 }
   6857 
   6858 #define	MDI_IO		CTRL_SWDPIN(2)
   6859 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6860 #define	MDI_CLK		CTRL_SWDPIN(3)
   6861 
   6862 static void
   6863 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6864 {
   6865 	uint32_t i, v;
   6866 
   6867 	v = CSR_READ(sc, WMREG_CTRL);
   6868 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6869 	v |= MDI_DIR | CTRL_SWDPIO(3);
   6870 
   6871 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   6872 		if (data & i)
   6873 			v |= MDI_IO;
   6874 		else
   6875 			v &= ~MDI_IO;
   6876 		CSR_WRITE(sc, WMREG_CTRL, v);
   6877 		CSR_WRITE_FLUSH(sc);
   6878 		delay(10);
   6879 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6880 		CSR_WRITE_FLUSH(sc);
   6881 		delay(10);
   6882 		CSR_WRITE(sc, WMREG_CTRL, v);
   6883 		CSR_WRITE_FLUSH(sc);
   6884 		delay(10);
   6885 	}
   6886 }
   6887 
   6888 static uint32_t
   6889 wm_i82543_mii_recvbits(struct wm_softc *sc)
   6890 {
   6891 	uint32_t v, i, data = 0;
   6892 
   6893 	v = CSR_READ(sc, WMREG_CTRL);
   6894 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6895 	v |= CTRL_SWDPIO(3);
   6896 
   6897 	CSR_WRITE(sc, WMREG_CTRL, v);
   6898 	CSR_WRITE_FLUSH(sc);
   6899 	delay(10);
   6900 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6901 	CSR_WRITE_FLUSH(sc);
   6902 	delay(10);
   6903 	CSR_WRITE(sc, WMREG_CTRL, v);
   6904 	CSR_WRITE_FLUSH(sc);
   6905 	delay(10);
   6906 
   6907 	for (i = 0; i < 16; i++) {
   6908 		data <<= 1;
   6909 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6910 		CSR_WRITE_FLUSH(sc);
   6911 		delay(10);
   6912 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   6913 			data |= 1;
   6914 		CSR_WRITE(sc, WMREG_CTRL, v);
   6915 		CSR_WRITE_FLUSH(sc);
   6916 		delay(10);
   6917 	}
   6918 
   6919 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6920 	CSR_WRITE_FLUSH(sc);
   6921 	delay(10);
   6922 	CSR_WRITE(sc, WMREG_CTRL, v);
   6923 	CSR_WRITE_FLUSH(sc);
   6924 	delay(10);
   6925 
   6926 	return data;
   6927 }
   6928 
   6929 #undef MDI_IO
   6930 #undef MDI_DIR
   6931 #undef MDI_CLK
   6932 
   6933 /*
   6934  * wm_gmii_i82543_readreg:	[mii interface function]
   6935  *
   6936  *	Read a PHY register on the GMII (i82543 version).
   6937  */
   6938 static int
   6939 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   6940 {
   6941 	struct wm_softc *sc = device_private(self);
   6942 	int rv;
   6943 
   6944 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6945 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   6946 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   6947 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   6948 
   6949 	DPRINTF(WM_DEBUG_GMII,
   6950 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   6951 	    device_xname(sc->sc_dev), phy, reg, rv));
   6952 
   6953 	return rv;
   6954 }
   6955 
   6956 /*
   6957  * wm_gmii_i82543_writereg:	[mii interface function]
   6958  *
   6959  *	Write a PHY register on the GMII (i82543 version).
   6960  */
   6961 static void
   6962 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   6963 {
   6964 	struct wm_softc *sc = device_private(self);
   6965 
   6966 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6967 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   6968 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   6969 	    (MII_COMMAND_START << 30), 32);
   6970 }
   6971 
   6972 /*
   6973  * wm_gmii_i82544_readreg:	[mii interface function]
   6974  *
   6975  *	Read a PHY register on the GMII.
   6976  */
   6977 static int
   6978 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   6979 {
   6980 	struct wm_softc *sc = device_private(self);
   6981 	uint32_t mdic = 0;
   6982 	int i, rv;
   6983 
   6984 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   6985 	    MDIC_REGADD(reg));
   6986 
   6987 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6988 		mdic = CSR_READ(sc, WMREG_MDIC);
   6989 		if (mdic & MDIC_READY)
   6990 			break;
   6991 		delay(50);
   6992 	}
   6993 
   6994 	if ((mdic & MDIC_READY) == 0) {
   6995 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   6996 		    device_xname(sc->sc_dev), phy, reg);
   6997 		rv = 0;
   6998 	} else if (mdic & MDIC_E) {
   6999 #if 0 /* This is normal if no PHY is present. */
   7000 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   7001 		    device_xname(sc->sc_dev), phy, reg);
   7002 #endif
   7003 		rv = 0;
   7004 	} else {
   7005 		rv = MDIC_DATA(mdic);
   7006 		if (rv == 0xffff)
   7007 			rv = 0;
   7008 	}
   7009 
   7010 	return rv;
   7011 }
   7012 
   7013 /*
   7014  * wm_gmii_i82544_writereg:	[mii interface function]
   7015  *
   7016  *	Write a PHY register on the GMII.
   7017  */
   7018 static void
   7019 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   7020 {
   7021 	struct wm_softc *sc = device_private(self);
   7022 	uint32_t mdic = 0;
   7023 	int i;
   7024 
   7025 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   7026 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   7027 
   7028 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7029 		mdic = CSR_READ(sc, WMREG_MDIC);
   7030 		if (mdic & MDIC_READY)
   7031 			break;
   7032 		delay(50);
   7033 	}
   7034 
   7035 	if ((mdic & MDIC_READY) == 0)
   7036 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   7037 		    device_xname(sc->sc_dev), phy, reg);
   7038 	else if (mdic & MDIC_E)
   7039 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   7040 		    device_xname(sc->sc_dev), phy, reg);
   7041 }
   7042 
   7043 /*
   7044  * wm_gmii_i80003_readreg:	[mii interface function]
   7045  *
   7046  *	Read a PHY register on the kumeran
   7047  * This could be handled by the PHY layer if we didn't have to lock the
   7048  * ressource ...
   7049  */
   7050 static int
   7051 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   7052 {
   7053 	struct wm_softc *sc = device_private(self);
   7054 	int sem;
   7055 	int rv;
   7056 
   7057 	if (phy != 1) /* only one PHY on kumeran bus */
   7058 		return 0;
   7059 
   7060 	sem = swfwphysem[sc->sc_funcid];
   7061 	if (wm_get_swfw_semaphore(sc, sem)) {
   7062 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7063 		    __func__);
   7064 		return 0;
   7065 	}
   7066 
   7067 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7068 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7069 		    reg >> GG82563_PAGE_SHIFT);
   7070 	} else {
   7071 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   7072 		    reg >> GG82563_PAGE_SHIFT);
   7073 	}
   7074 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   7075 	delay(200);
   7076 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   7077 	delay(200);
   7078 
   7079 	wm_put_swfw_semaphore(sc, sem);
   7080 	return rv;
   7081 }
   7082 
   7083 /*
   7084  * wm_gmii_i80003_writereg:	[mii interface function]
   7085  *
   7086  *	Write a PHY register on the kumeran.
   7087  * This could be handled by the PHY layer if we didn't have to lock the
   7088  * ressource ...
   7089  */
   7090 static void
   7091 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   7092 {
   7093 	struct wm_softc *sc = device_private(self);
   7094 	int sem;
   7095 
   7096 	if (phy != 1) /* only one PHY on kumeran bus */
   7097 		return;
   7098 
   7099 	sem = swfwphysem[sc->sc_funcid];
   7100 	if (wm_get_swfw_semaphore(sc, sem)) {
   7101 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7102 		    __func__);
   7103 		return;
   7104 	}
   7105 
   7106 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7107 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7108 		    reg >> GG82563_PAGE_SHIFT);
   7109 	} else {
   7110 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   7111 		    reg >> GG82563_PAGE_SHIFT);
   7112 	}
   7113 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   7114 	delay(200);
   7115 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   7116 	delay(200);
   7117 
   7118 	wm_put_swfw_semaphore(sc, sem);
   7119 }
   7120 
   7121 /*
   7122  * wm_gmii_bm_readreg:	[mii interface function]
   7123  *
   7124  *	Read a PHY register on the kumeran
   7125  * This could be handled by the PHY layer if we didn't have to lock the
   7126  * ressource ...
   7127  */
   7128 static int
   7129 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   7130 {
   7131 	struct wm_softc *sc = device_private(self);
   7132 	int sem;
   7133 	int rv;
   7134 
   7135 	sem = swfwphysem[sc->sc_funcid];
   7136 	if (wm_get_swfw_semaphore(sc, sem)) {
   7137 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7138 		    __func__);
   7139 		return 0;
   7140 	}
   7141 
   7142 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   7143 		if (phy == 1)
   7144 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   7145 			    reg);
   7146 		else
   7147 			wm_gmii_i82544_writereg(self, phy,
   7148 			    GG82563_PHY_PAGE_SELECT,
   7149 			    reg >> GG82563_PAGE_SHIFT);
   7150 	}
   7151 
   7152 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   7153 	wm_put_swfw_semaphore(sc, sem);
   7154 	return rv;
   7155 }
   7156 
   7157 /*
   7158  * wm_gmii_bm_writereg:	[mii interface function]
   7159  *
   7160  *	Write a PHY register on the kumeran.
   7161  * This could be handled by the PHY layer if we didn't have to lock the
   7162  * ressource ...
   7163  */
   7164 static void
   7165 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   7166 {
   7167 	struct wm_softc *sc = device_private(self);
   7168 	int sem;
   7169 
   7170 	sem = swfwphysem[sc->sc_funcid];
   7171 	if (wm_get_swfw_semaphore(sc, sem)) {
   7172 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7173 		    __func__);
   7174 		return;
   7175 	}
   7176 
   7177 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   7178 		if (phy == 1)
   7179 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   7180 			    reg);
   7181 		else
   7182 			wm_gmii_i82544_writereg(self, phy,
   7183 			    GG82563_PHY_PAGE_SELECT,
   7184 			    reg >> GG82563_PAGE_SHIFT);
   7185 	}
   7186 
   7187 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   7188 	wm_put_swfw_semaphore(sc, sem);
   7189 }
   7190 
   7191 static void
   7192 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   7193 {
   7194 	struct wm_softc *sc = device_private(self);
   7195 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   7196 	uint16_t wuce;
   7197 
   7198 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   7199 	if (sc->sc_type == WM_T_PCH) {
   7200 		/* XXX e1000 driver do nothing... why? */
   7201 	}
   7202 
   7203 	/* Set page 769 */
   7204 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7205 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   7206 
   7207 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   7208 
   7209 	wuce &= ~BM_WUC_HOST_WU_BIT;
   7210 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   7211 	    wuce | BM_WUC_ENABLE_BIT);
   7212 
   7213 	/* Select page 800 */
   7214 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7215 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   7216 
   7217 	/* Write page 800 */
   7218 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   7219 
   7220 	if (rd)
   7221 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   7222 	else
   7223 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   7224 
   7225 	/* Set page 769 */
   7226 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7227 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   7228 
   7229 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   7230 }
   7231 
   7232 /*
   7233  * wm_gmii_hv_readreg:	[mii interface function]
   7234  *
   7235  *	Read a PHY register on the kumeran
   7236  * This could be handled by the PHY layer if we didn't have to lock the
   7237  * ressource ...
   7238  */
   7239 static int
   7240 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   7241 {
   7242 	struct wm_softc *sc = device_private(self);
   7243 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7244 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7245 	uint16_t val;
   7246 	int rv;
   7247 
   7248 	if (wm_get_swfwhw_semaphore(sc)) {
   7249 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7250 		    __func__);
   7251 		return 0;
   7252 	}
   7253 
   7254 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7255 	if (sc->sc_phytype == WMPHY_82577) {
   7256 		/* XXX must write */
   7257 	}
   7258 
   7259 	/* Page 800 works differently than the rest so it has its own func */
   7260 	if (page == BM_WUC_PAGE) {
   7261 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   7262 		return val;
   7263 	}
   7264 
   7265 	/*
   7266 	 * Lower than page 768 works differently than the rest so it has its
   7267 	 * own func
   7268 	 */
   7269 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7270 		printf("gmii_hv_readreg!!!\n");
   7271 		return 0;
   7272 	}
   7273 
   7274 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7275 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7276 		    page << BME1000_PAGE_SHIFT);
   7277 	}
   7278 
   7279 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   7280 	wm_put_swfwhw_semaphore(sc);
   7281 	return rv;
   7282 }
   7283 
   7284 /*
   7285  * wm_gmii_hv_writereg:	[mii interface function]
   7286  *
   7287  *	Write a PHY register on the kumeran.
   7288  * This could be handled by the PHY layer if we didn't have to lock the
   7289  * ressource ...
   7290  */
   7291 static void
   7292 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   7293 {
   7294 	struct wm_softc *sc = device_private(self);
   7295 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7296 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7297 
   7298 	if (wm_get_swfwhw_semaphore(sc)) {
   7299 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7300 		    __func__);
   7301 		return;
   7302 	}
   7303 
   7304 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7305 
   7306 	/* Page 800 works differently than the rest so it has its own func */
   7307 	if (page == BM_WUC_PAGE) {
   7308 		uint16_t tmp;
   7309 
   7310 		tmp = val;
   7311 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   7312 		return;
   7313 	}
   7314 
   7315 	/*
   7316 	 * Lower than page 768 works differently than the rest so it has its
   7317 	 * own func
   7318 	 */
   7319 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7320 		printf("gmii_hv_writereg!!!\n");
   7321 		return;
   7322 	}
   7323 
   7324 	/*
   7325 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   7326 	 * Power Down (whenever bit 11 of the PHY control register is set)
   7327 	 */
   7328 
   7329 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7330 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7331 		    page << BME1000_PAGE_SHIFT);
   7332 	}
   7333 
   7334 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   7335 	wm_put_swfwhw_semaphore(sc);
   7336 }
   7337 
   7338 /*
   7339  * wm_gmii_82580_readreg:	[mii interface function]
   7340  *
   7341  *	Read a PHY register on the 82580 and I350.
   7342  * This could be handled by the PHY layer if we didn't have to lock the
   7343  * ressource ...
   7344  */
   7345 static int
   7346 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   7347 {
   7348 	struct wm_softc *sc = device_private(self);
   7349 	int sem;
   7350 	int rv;
   7351 
   7352 	sem = swfwphysem[sc->sc_funcid];
   7353 	if (wm_get_swfw_semaphore(sc, sem)) {
   7354 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7355 		    __func__);
   7356 		return 0;
   7357 	}
   7358 
   7359 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   7360 
   7361 	wm_put_swfw_semaphore(sc, sem);
   7362 	return rv;
   7363 }
   7364 
   7365 /*
   7366  * wm_gmii_82580_writereg:	[mii interface function]
   7367  *
   7368  *	Write a PHY register on the 82580 and I350.
   7369  * This could be handled by the PHY layer if we didn't have to lock the
   7370  * ressource ...
   7371  */
   7372 static void
   7373 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   7374 {
   7375 	struct wm_softc *sc = device_private(self);
   7376 	int sem;
   7377 
   7378 	sem = swfwphysem[sc->sc_funcid];
   7379 	if (wm_get_swfw_semaphore(sc, sem)) {
   7380 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7381 		    __func__);
   7382 		return;
   7383 	}
   7384 
   7385 	wm_gmii_i82544_writereg(self, phy, reg, val);
   7386 
   7387 	wm_put_swfw_semaphore(sc, sem);
   7388 }
   7389 
   7390 /*
   7391  * wm_gmii_gs40g_readreg:	[mii interface function]
   7392  *
   7393  *	Read a PHY register on the I2100 and I211.
   7394  * This could be handled by the PHY layer if we didn't have to lock the
   7395  * ressource ...
   7396  */
   7397 static int
   7398 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   7399 {
   7400 	struct wm_softc *sc = device_private(self);
   7401 	int sem;
   7402 	int page, offset;
   7403 	int rv;
   7404 
   7405 	/* Acquire semaphore */
   7406 	sem = swfwphysem[sc->sc_funcid];
   7407 	if (wm_get_swfw_semaphore(sc, sem)) {
   7408 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7409 		    __func__);
   7410 		return 0;
   7411 	}
   7412 
   7413 	/* Page select */
   7414 	page = reg >> GS40G_PAGE_SHIFT;
   7415 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   7416 
   7417 	/* Read reg */
   7418 	offset = reg & GS40G_OFFSET_MASK;
   7419 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   7420 
   7421 	wm_put_swfw_semaphore(sc, sem);
   7422 	return rv;
   7423 }
   7424 
   7425 /*
   7426  * wm_gmii_gs40g_writereg:	[mii interface function]
   7427  *
   7428  *	Write a PHY register on the I210 and I211.
   7429  * This could be handled by the PHY layer if we didn't have to lock the
   7430  * ressource ...
   7431  */
   7432 static void
   7433 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   7434 {
   7435 	struct wm_softc *sc = device_private(self);
   7436 	int sem;
   7437 	int page, offset;
   7438 
   7439 	/* Acquire semaphore */
   7440 	sem = swfwphysem[sc->sc_funcid];
   7441 	if (wm_get_swfw_semaphore(sc, sem)) {
   7442 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7443 		    __func__);
   7444 		return;
   7445 	}
   7446 
   7447 	/* Page select */
   7448 	page = reg >> GS40G_PAGE_SHIFT;
   7449 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   7450 
   7451 	/* Write reg */
   7452 	offset = reg & GS40G_OFFSET_MASK;
   7453 	wm_gmii_i82544_writereg(self, phy, offset, val);
   7454 
   7455 	/* Release semaphore */
   7456 	wm_put_swfw_semaphore(sc, sem);
   7457 }
   7458 
   7459 /*
   7460  * wm_gmii_statchg:	[mii interface function]
   7461  *
   7462  *	Callback from MII layer when media changes.
   7463  */
   7464 static void
   7465 wm_gmii_statchg(struct ifnet *ifp)
   7466 {
   7467 	struct wm_softc *sc = ifp->if_softc;
   7468 	struct mii_data *mii = &sc->sc_mii;
   7469 
   7470 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   7471 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7472 	sc->sc_fcrtl &= ~FCRTL_XONE;
   7473 
   7474 	/*
   7475 	 * Get flow control negotiation result.
   7476 	 */
   7477 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   7478 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   7479 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   7480 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   7481 	}
   7482 
   7483 	if (sc->sc_flowflags & IFM_FLOW) {
   7484 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   7485 			sc->sc_ctrl |= CTRL_TFCE;
   7486 			sc->sc_fcrtl |= FCRTL_XONE;
   7487 		}
   7488 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   7489 			sc->sc_ctrl |= CTRL_RFCE;
   7490 	}
   7491 
   7492 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7493 		DPRINTF(WM_DEBUG_LINK,
   7494 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   7495 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7496 	} else {
   7497 		DPRINTF(WM_DEBUG_LINK,
   7498 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   7499 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7500 	}
   7501 
   7502 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7503 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7504 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   7505 						 : WMREG_FCRTL, sc->sc_fcrtl);
   7506 	if (sc->sc_type == WM_T_80003) {
   7507 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   7508 		case IFM_1000_T:
   7509 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7510 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   7511 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7512 			break;
   7513 		default:
   7514 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7515 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   7516 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   7517 			break;
   7518 		}
   7519 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7520 	}
   7521 }
   7522 
   7523 /*
   7524  * wm_kmrn_readreg:
   7525  *
   7526  *	Read a kumeran register
   7527  */
   7528 static int
   7529 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   7530 {
   7531 	int rv;
   7532 
   7533 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   7534 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7535 			aprint_error_dev(sc->sc_dev,
   7536 			    "%s: failed to get semaphore\n", __func__);
   7537 			return 0;
   7538 		}
   7539 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   7540 		if (wm_get_swfwhw_semaphore(sc)) {
   7541 			aprint_error_dev(sc->sc_dev,
   7542 			    "%s: failed to get semaphore\n", __func__);
   7543 			return 0;
   7544 		}
   7545 	}
   7546 
   7547 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7548 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7549 	    KUMCTRLSTA_REN);
   7550 	CSR_WRITE_FLUSH(sc);
   7551 	delay(2);
   7552 
   7553 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   7554 
   7555 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   7556 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7557 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   7558 		wm_put_swfwhw_semaphore(sc);
   7559 
   7560 	return rv;
   7561 }
   7562 
   7563 /*
   7564  * wm_kmrn_writereg:
   7565  *
   7566  *	Write a kumeran register
   7567  */
   7568 static void
   7569 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   7570 {
   7571 
   7572 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   7573 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7574 			aprint_error_dev(sc->sc_dev,
   7575 			    "%s: failed to get semaphore\n", __func__);
   7576 			return;
   7577 		}
   7578 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   7579 		if (wm_get_swfwhw_semaphore(sc)) {
   7580 			aprint_error_dev(sc->sc_dev,
   7581 			    "%s: failed to get semaphore\n", __func__);
   7582 			return;
   7583 		}
   7584 	}
   7585 
   7586 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7587 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7588 	    (val & KUMCTRLSTA_MASK));
   7589 
   7590 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   7591 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7592 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   7593 		wm_put_swfwhw_semaphore(sc);
   7594 }
   7595 
   7596 /* SGMII related */
   7597 
   7598 /*
   7599  * wm_sgmii_uses_mdio
   7600  *
   7601  * Check whether the transaction is to the internal PHY or the external
   7602  * MDIO interface. Return true if it's MDIO.
   7603  */
   7604 static bool
   7605 wm_sgmii_uses_mdio(struct wm_softc *sc)
   7606 {
   7607 	uint32_t reg;
   7608 	bool ismdio = false;
   7609 
   7610 	switch (sc->sc_type) {
   7611 	case WM_T_82575:
   7612 	case WM_T_82576:
   7613 		reg = CSR_READ(sc, WMREG_MDIC);
   7614 		ismdio = ((reg & MDIC_DEST) != 0);
   7615 		break;
   7616 	case WM_T_82580:
   7617 	case WM_T_I350:
   7618 	case WM_T_I354:
   7619 	case WM_T_I210:
   7620 	case WM_T_I211:
   7621 		reg = CSR_READ(sc, WMREG_MDICNFG);
   7622 		ismdio = ((reg & MDICNFG_DEST) != 0);
   7623 		break;
   7624 	default:
   7625 		break;
   7626 	}
   7627 
   7628 	return ismdio;
   7629 }
   7630 
   7631 /*
   7632  * wm_sgmii_readreg:	[mii interface function]
   7633  *
   7634  *	Read a PHY register on the SGMII
   7635  * This could be handled by the PHY layer if we didn't have to lock the
   7636  * ressource ...
   7637  */
   7638 static int
   7639 wm_sgmii_readreg(device_t self, int phy, int reg)
   7640 {
   7641 	struct wm_softc *sc = device_private(self);
   7642 	uint32_t i2ccmd;
   7643 	int i, rv;
   7644 
   7645 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7646 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7647 		    __func__);
   7648 		return 0;
   7649 	}
   7650 
   7651 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7652 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7653 	    | I2CCMD_OPCODE_READ;
   7654 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7655 
   7656 	/* Poll the ready bit */
   7657 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7658 		delay(50);
   7659 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7660 		if (i2ccmd & I2CCMD_READY)
   7661 			break;
   7662 	}
   7663 	if ((i2ccmd & I2CCMD_READY) == 0)
   7664 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7665 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7666 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7667 
   7668 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7669 
   7670 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7671 	return rv;
   7672 }
   7673 
   7674 /*
   7675  * wm_sgmii_writereg:	[mii interface function]
   7676  *
   7677  *	Write a PHY register on the SGMII.
   7678  * This could be handled by the PHY layer if we didn't have to lock the
   7679  * ressource ...
   7680  */
   7681 static void
   7682 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7683 {
   7684 	struct wm_softc *sc = device_private(self);
   7685 	uint32_t i2ccmd;
   7686 	int i;
   7687 	int val_swapped;
   7688 
   7689 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7690 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7691 		    __func__);
   7692 		return;
   7693 	}
   7694 	/* Swap the data bytes for the I2C interface */
   7695 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   7696 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7697 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7698 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   7699 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7700 
   7701 	/* Poll the ready bit */
   7702 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7703 		delay(50);
   7704 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7705 		if (i2ccmd & I2CCMD_READY)
   7706 			break;
   7707 	}
   7708 	if ((i2ccmd & I2CCMD_READY) == 0)
   7709 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7710 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7711 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7712 
   7713 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7714 }
   7715 
   7716 /* TBI related */
   7717 
   7718 /*
   7719  * wm_tbi_mediainit:
   7720  *
   7721  *	Initialize media for use on 1000BASE-X devices.
   7722  */
   7723 static void
   7724 wm_tbi_mediainit(struct wm_softc *sc)
   7725 {
   7726 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7727 	const char *sep = "";
   7728 
   7729 	if (sc->sc_type < WM_T_82543)
   7730 		sc->sc_tipg = TIPG_WM_DFLT;
   7731 	else
   7732 		sc->sc_tipg = TIPG_LG_DFLT;
   7733 
   7734 	sc->sc_tbi_serdes_anegticks = 5;
   7735 
   7736 	/* Initialize our media structures */
   7737 	sc->sc_mii.mii_ifp = ifp;
   7738 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7739 
   7740 	if ((sc->sc_type >= WM_T_82575)
   7741 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   7742 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   7743 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   7744 	else
   7745 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   7746 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   7747 
   7748 	/*
   7749 	 * SWD Pins:
   7750 	 *
   7751 	 *	0 = Link LED (output)
   7752 	 *	1 = Loss Of Signal (input)
   7753 	 */
   7754 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   7755 
   7756 	/* XXX Perhaps this is only for TBI */
   7757 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   7758 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   7759 
   7760 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7761 		sc->sc_ctrl &= ~CTRL_LRST;
   7762 
   7763 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7764 
   7765 #define	ADD(ss, mm, dd)							\
   7766 do {									\
   7767 	aprint_normal("%s%s", sep, ss);					\
   7768 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   7769 	sep = ", ";							\
   7770 } while (/*CONSTCOND*/0)
   7771 
   7772 	aprint_normal_dev(sc->sc_dev, "");
   7773 
   7774 	/* Only 82545 is LX */
   7775 	if (sc->sc_type == WM_T_82545) {
   7776 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   7777 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   7778 	} else {
   7779 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   7780 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   7781 	}
   7782 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   7783 	aprint_normal("\n");
   7784 
   7785 #undef ADD
   7786 
   7787 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   7788 }
   7789 
   7790 /*
   7791  * wm_tbi_mediachange:	[ifmedia interface function]
   7792  *
   7793  *	Set hardware to newly-selected media on a 1000BASE-X device.
   7794  */
   7795 static int
   7796 wm_tbi_mediachange(struct ifnet *ifp)
   7797 {
   7798 	struct wm_softc *sc = ifp->if_softc;
   7799 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7800 	uint32_t status;
   7801 	int i;
   7802 
   7803 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   7804 		/* XXX need some work for >= 82571 and < 82575 */
   7805 		if (sc->sc_type < WM_T_82575)
   7806 			return 0;
   7807 	}
   7808 
   7809 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   7810 	    || (sc->sc_type >= WM_T_82575))
   7811 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   7812 
   7813 	sc->sc_ctrl &= ~CTRL_LRST;
   7814 	sc->sc_txcw = TXCW_ANE;
   7815 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   7816 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   7817 	else if (ife->ifm_media & IFM_FDX)
   7818 		sc->sc_txcw |= TXCW_FD;
   7819 	else
   7820 		sc->sc_txcw |= TXCW_HD;
   7821 
   7822 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   7823 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   7824 
   7825 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   7826 		    device_xname(sc->sc_dev), sc->sc_txcw));
   7827 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7828 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7829 	CSR_WRITE_FLUSH(sc);
   7830 	delay(1000);
   7831 
   7832 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   7833 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   7834 
   7835 	/*
   7836 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   7837 	 * optics detect a signal, 0 if they don't.
   7838 	 */
   7839 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   7840 		/* Have signal; wait for the link to come up. */
   7841 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   7842 			delay(10000);
   7843 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   7844 				break;
   7845 		}
   7846 
   7847 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   7848 			    device_xname(sc->sc_dev),i));
   7849 
   7850 		status = CSR_READ(sc, WMREG_STATUS);
   7851 		DPRINTF(WM_DEBUG_LINK,
   7852 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   7853 			device_xname(sc->sc_dev),status, STATUS_LU));
   7854 		if (status & STATUS_LU) {
   7855 			/* Link is up. */
   7856 			DPRINTF(WM_DEBUG_LINK,
   7857 			    ("%s: LINK: set media -> link up %s\n",
   7858 			    device_xname(sc->sc_dev),
   7859 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7860 
   7861 			/*
   7862 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7863 			 * so we should update sc->sc_ctrl
   7864 			 */
   7865 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7866 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7867 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7868 			if (status & STATUS_FD)
   7869 				sc->sc_tctl |=
   7870 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7871 			else
   7872 				sc->sc_tctl |=
   7873 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7874 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   7875 				sc->sc_fcrtl |= FCRTL_XONE;
   7876 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7877 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7878 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7879 				      sc->sc_fcrtl);
   7880 			sc->sc_tbi_linkup = 1;
   7881 		} else {
   7882 			if (i == WM_LINKUP_TIMEOUT)
   7883 				wm_check_for_link(sc);
   7884 			/* Link is down. */
   7885 			DPRINTF(WM_DEBUG_LINK,
   7886 			    ("%s: LINK: set media -> link down\n",
   7887 			    device_xname(sc->sc_dev)));
   7888 			sc->sc_tbi_linkup = 0;
   7889 		}
   7890 	} else {
   7891 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   7892 		    device_xname(sc->sc_dev)));
   7893 		sc->sc_tbi_linkup = 0;
   7894 	}
   7895 
   7896 	wm_tbi_serdes_set_linkled(sc);
   7897 
   7898 	return 0;
   7899 }
   7900 
   7901 /*
   7902  * wm_tbi_mediastatus:	[ifmedia interface function]
   7903  *
   7904  *	Get the current interface media status on a 1000BASE-X device.
   7905  */
   7906 static void
   7907 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7908 {
   7909 	struct wm_softc *sc = ifp->if_softc;
   7910 	uint32_t ctrl, status;
   7911 
   7912 	ifmr->ifm_status = IFM_AVALID;
   7913 	ifmr->ifm_active = IFM_ETHER;
   7914 
   7915 	status = CSR_READ(sc, WMREG_STATUS);
   7916 	if ((status & STATUS_LU) == 0) {
   7917 		ifmr->ifm_active |= IFM_NONE;
   7918 		return;
   7919 	}
   7920 
   7921 	ifmr->ifm_status |= IFM_ACTIVE;
   7922 	/* Only 82545 is LX */
   7923 	if (sc->sc_type == WM_T_82545)
   7924 		ifmr->ifm_active |= IFM_1000_LX;
   7925 	else
   7926 		ifmr->ifm_active |= IFM_1000_SX;
   7927 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   7928 		ifmr->ifm_active |= IFM_FDX;
   7929 	else
   7930 		ifmr->ifm_active |= IFM_HDX;
   7931 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7932 	if (ctrl & CTRL_RFCE)
   7933 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   7934 	if (ctrl & CTRL_TFCE)
   7935 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   7936 }
   7937 
   7938 /* XXX TBI only */
   7939 static int
   7940 wm_check_for_link(struct wm_softc *sc)
   7941 {
   7942 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7943 	uint32_t rxcw;
   7944 	uint32_t ctrl;
   7945 	uint32_t status;
   7946 	uint32_t sig;
   7947 
   7948 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   7949 		/* XXX need some work for >= 82571 */
   7950 		if (sc->sc_type >= WM_T_82571) {
   7951 			sc->sc_tbi_linkup = 1;
   7952 			return 0;
   7953 		}
   7954 	}
   7955 
   7956 	rxcw = CSR_READ(sc, WMREG_RXCW);
   7957 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7958 	status = CSR_READ(sc, WMREG_STATUS);
   7959 
   7960 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   7961 
   7962 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   7963 		device_xname(sc->sc_dev), __func__,
   7964 		((ctrl & CTRL_SWDPIN(1)) == sig),
   7965 		((status & STATUS_LU) != 0),
   7966 		((rxcw & RXCW_C) != 0)
   7967 		    ));
   7968 
   7969 	/*
   7970 	 * SWDPIN   LU RXCW
   7971 	 *      0    0    0
   7972 	 *      0    0    1	(should not happen)
   7973 	 *      0    1    0	(should not happen)
   7974 	 *      0    1    1	(should not happen)
   7975 	 *      1    0    0	Disable autonego and force linkup
   7976 	 *      1    0    1	got /C/ but not linkup yet
   7977 	 *      1    1    0	(linkup)
   7978 	 *      1    1    1	If IFM_AUTO, back to autonego
   7979 	 *
   7980 	 */
   7981 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7982 	    && ((status & STATUS_LU) == 0)
   7983 	    && ((rxcw & RXCW_C) == 0)) {
   7984 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   7985 			__func__));
   7986 		sc->sc_tbi_linkup = 0;
   7987 		/* Disable auto-negotiation in the TXCW register */
   7988 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   7989 
   7990 		/*
   7991 		 * Force link-up and also force full-duplex.
   7992 		 *
   7993 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   7994 		 * so we should update sc->sc_ctrl
   7995 		 */
   7996 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   7997 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7998 	} else if (((status & STATUS_LU) != 0)
   7999 	    && ((rxcw & RXCW_C) != 0)
   8000 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   8001 		sc->sc_tbi_linkup = 1;
   8002 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   8003 			__func__));
   8004 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8005 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   8006 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8007 	    && ((rxcw & RXCW_C) != 0)) {
   8008 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   8009 	} else {
   8010 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   8011 			status));
   8012 	}
   8013 
   8014 	return 0;
   8015 }
   8016 
   8017 /*
   8018  * wm_tbi_tick:
   8019  *
   8020  *	Check the link on TBI devices.
   8021  *	This function acts as mii_tick().
   8022  */
   8023 static void
   8024 wm_tbi_tick(struct wm_softc *sc)
   8025 {
   8026 	struct mii_data *mii = &sc->sc_mii;
   8027 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   8028 	uint32_t status;
   8029 
   8030 	KASSERT(WM_TX_LOCKED(sc));
   8031 
   8032 	status = CSR_READ(sc, WMREG_STATUS);
   8033 
   8034 	/* XXX is this needed? */
   8035 	(void)CSR_READ(sc, WMREG_RXCW);
   8036 	(void)CSR_READ(sc, WMREG_CTRL);
   8037 
   8038 	/* set link status */
   8039 	if ((status & STATUS_LU) == 0) {
   8040 		DPRINTF(WM_DEBUG_LINK,
   8041 		    ("%s: LINK: checklink -> down\n",
   8042 			device_xname(sc->sc_dev)));
   8043 		sc->sc_tbi_linkup = 0;
   8044 	} else if (sc->sc_tbi_linkup == 0) {
   8045 		DPRINTF(WM_DEBUG_LINK,
   8046 		    ("%s: LINK: checklink -> up %s\n",
   8047 			device_xname(sc->sc_dev),
   8048 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8049 		sc->sc_tbi_linkup = 1;
   8050 		sc->sc_tbi_serdes_ticks = 0;
   8051 	}
   8052 
   8053 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   8054 		goto setled;
   8055 
   8056 	if ((status & STATUS_LU) == 0) {
   8057 		sc->sc_tbi_linkup = 0;
   8058 		/* If the timer expired, retry autonegotiation */
   8059 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8060 		    && (++sc->sc_tbi_serdes_ticks
   8061 			>= sc->sc_tbi_serdes_anegticks)) {
   8062 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   8063 			sc->sc_tbi_serdes_ticks = 0;
   8064 			/*
   8065 			 * Reset the link, and let autonegotiation do
   8066 			 * its thing
   8067 			 */
   8068 			sc->sc_ctrl |= CTRL_LRST;
   8069 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8070 			CSR_WRITE_FLUSH(sc);
   8071 			delay(1000);
   8072 			sc->sc_ctrl &= ~CTRL_LRST;
   8073 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8074 			CSR_WRITE_FLUSH(sc);
   8075 			delay(1000);
   8076 			CSR_WRITE(sc, WMREG_TXCW,
   8077 			    sc->sc_txcw & ~TXCW_ANE);
   8078 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8079 		}
   8080 	}
   8081 
   8082 setled:
   8083 	wm_tbi_serdes_set_linkled(sc);
   8084 }
   8085 
   8086 /* SERDES related */
   8087 static void
   8088 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   8089 {
   8090 	uint32_t reg;
   8091 
   8092 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   8093 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   8094 		return;
   8095 
   8096 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   8097 	reg |= PCS_CFG_PCS_EN;
   8098 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   8099 
   8100 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8101 	reg &= ~CTRL_EXT_SWDPIN(3);
   8102 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8103 	CSR_WRITE_FLUSH(sc);
   8104 }
   8105 
   8106 static int
   8107 wm_serdes_mediachange(struct ifnet *ifp)
   8108 {
   8109 	struct wm_softc *sc = ifp->if_softc;
   8110 	bool pcs_autoneg = true; /* XXX */
   8111 	uint32_t ctrl_ext, pcs_lctl, reg;
   8112 
   8113 	/* XXX Currently, this function is not called on 8257[12] */
   8114 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   8115 	    || (sc->sc_type >= WM_T_82575))
   8116 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   8117 
   8118 	wm_serdes_power_up_link_82575(sc);
   8119 
   8120 	sc->sc_ctrl |= CTRL_SLU;
   8121 
   8122 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   8123 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   8124 
   8125 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8126 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   8127 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   8128 	case CTRL_EXT_LINK_MODE_SGMII:
   8129 		pcs_autoneg = true;
   8130 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   8131 		break;
   8132 	case CTRL_EXT_LINK_MODE_1000KX:
   8133 		pcs_autoneg = false;
   8134 		/* FALLTHROUGH */
   8135 	default:
   8136 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   8137 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   8138 				pcs_autoneg = false;
   8139 		}
   8140 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   8141 		    | CTRL_FRCFDX;
   8142 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   8143 	}
   8144 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8145 
   8146 	if (pcs_autoneg) {
   8147 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   8148 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   8149 
   8150 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   8151 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   8152 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   8153 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   8154 	} else
   8155 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   8156 
   8157 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   8158 
   8159 
   8160 	return 0;
   8161 }
   8162 
   8163 static void
   8164 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8165 {
   8166 	struct wm_softc *sc = ifp->if_softc;
   8167 	struct mii_data *mii = &sc->sc_mii;
   8168 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8169 	uint32_t pcs_adv, pcs_lpab, reg;
   8170 
   8171 	ifmr->ifm_status = IFM_AVALID;
   8172 	ifmr->ifm_active = IFM_ETHER;
   8173 
   8174 	/* Check PCS */
   8175 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8176 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   8177 		ifmr->ifm_active |= IFM_NONE;
   8178 		sc->sc_tbi_linkup = 0;
   8179 		goto setled;
   8180 	}
   8181 
   8182 	sc->sc_tbi_linkup = 1;
   8183 	ifmr->ifm_status |= IFM_ACTIVE;
   8184 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   8185 	if ((reg & PCS_LSTS_FDX) != 0)
   8186 		ifmr->ifm_active |= IFM_FDX;
   8187 	else
   8188 		ifmr->ifm_active |= IFM_HDX;
   8189 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   8190 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8191 		/* Check flow */
   8192 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8193 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8194 			printf("XXX LINKOK but not ACOMP\n");
   8195 			goto setled;
   8196 		}
   8197 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8198 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8199 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   8200 		if ((pcs_adv & TXCW_SYM_PAUSE)
   8201 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8202 			mii->mii_media_active |= IFM_FLOW
   8203 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8204 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8205 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   8206 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   8207 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   8208 			mii->mii_media_active |= IFM_FLOW
   8209 			    | IFM_ETH_TXPAUSE;
   8210 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   8211 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   8212 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8213 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   8214 			mii->mii_media_active |= IFM_FLOW
   8215 			    | IFM_ETH_RXPAUSE;
   8216 		} else {
   8217 		}
   8218 	}
   8219 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8220 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   8221 setled:
   8222 	wm_tbi_serdes_set_linkled(sc);
   8223 }
   8224 
   8225 /*
   8226  * wm_serdes_tick:
   8227  *
   8228  *	Check the link on serdes devices.
   8229  */
   8230 static void
   8231 wm_serdes_tick(struct wm_softc *sc)
   8232 {
   8233 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8234 	struct mii_data *mii = &sc->sc_mii;
   8235 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   8236 	uint32_t reg;
   8237 
   8238 	KASSERT(WM_TX_LOCKED(sc));
   8239 
   8240 	mii->mii_media_status = IFM_AVALID;
   8241 	mii->mii_media_active = IFM_ETHER;
   8242 
   8243 	/* Check PCS */
   8244 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8245 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   8246 		mii->mii_media_status |= IFM_ACTIVE;
   8247 		sc->sc_tbi_linkup = 1;
   8248 		sc->sc_tbi_serdes_ticks = 0;
   8249 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   8250 		if ((reg & PCS_LSTS_FDX) != 0)
   8251 			mii->mii_media_active |= IFM_FDX;
   8252 		else
   8253 			mii->mii_media_active |= IFM_HDX;
   8254 	} else {
   8255 		mii->mii_media_status |= IFM_NONE;
   8256 		sc->sc_tbi_linkup = 0;
   8257 		    /* If the timer expired, retry autonegotiation */
   8258 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8259 		    && (++sc->sc_tbi_serdes_ticks
   8260 			>= sc->sc_tbi_serdes_anegticks)) {
   8261 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   8262 			sc->sc_tbi_serdes_ticks = 0;
   8263 			/* XXX */
   8264 			wm_serdes_mediachange(ifp);
   8265 		}
   8266 	}
   8267 
   8268 	wm_tbi_serdes_set_linkled(sc);
   8269 }
   8270 
   8271 /* SFP related */
   8272 
   8273 static int
   8274 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   8275 {
   8276 	uint32_t i2ccmd;
   8277 	int i;
   8278 
   8279 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   8280 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8281 
   8282 	/* Poll the ready bit */
   8283 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8284 		delay(50);
   8285 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8286 		if (i2ccmd & I2CCMD_READY)
   8287 			break;
   8288 	}
   8289 	if ((i2ccmd & I2CCMD_READY) == 0)
   8290 		return -1;
   8291 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8292 		return -1;
   8293 
   8294 	*data = i2ccmd & 0x00ff;
   8295 
   8296 	return 0;
   8297 }
   8298 
   8299 static uint32_t
   8300 wm_sfp_get_media_type(struct wm_softc *sc)
   8301 {
   8302 	uint32_t ctrl_ext;
   8303 	uint8_t val = 0;
   8304 	int timeout = 3;
   8305 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   8306 	int rv = -1;
   8307 
   8308 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8309 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   8310 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   8311 	CSR_WRITE_FLUSH(sc);
   8312 
   8313 	/* Read SFP module data */
   8314 	while (timeout) {
   8315 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   8316 		if (rv == 0)
   8317 			break;
   8318 		delay(100*1000); /* XXX too big */
   8319 		timeout--;
   8320 	}
   8321 	if (rv != 0)
   8322 		goto out;
   8323 	switch (val) {
   8324 	case SFF_SFP_ID_SFF:
   8325 		aprint_normal_dev(sc->sc_dev,
   8326 		    "Module/Connector soldered to board\n");
   8327 		break;
   8328 	case SFF_SFP_ID_SFP:
   8329 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   8330 		break;
   8331 	case SFF_SFP_ID_UNKNOWN:
   8332 		goto out;
   8333 	default:
   8334 		break;
   8335 	}
   8336 
   8337 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   8338 	if (rv != 0) {
   8339 		goto out;
   8340 	}
   8341 
   8342 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   8343 		mediatype = WM_MEDIATYPE_SERDES;
   8344 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   8345 		sc->sc_flags |= WM_F_SGMII;
   8346 		mediatype = WM_MEDIATYPE_COPPER;
   8347 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   8348 		sc->sc_flags |= WM_F_SGMII;
   8349 		mediatype = WM_MEDIATYPE_SERDES;
   8350 	}
   8351 
   8352 out:
   8353 	/* Restore I2C interface setting */
   8354 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8355 
   8356 	return mediatype;
   8357 }
   8358 /*
   8359  * NVM related.
   8360  * Microwire, SPI (w/wo EERD) and Flash.
   8361  */
   8362 
   8363 /* Both spi and uwire */
   8364 
   8365 /*
   8366  * wm_eeprom_sendbits:
   8367  *
   8368  *	Send a series of bits to the EEPROM.
   8369  */
   8370 static void
   8371 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   8372 {
   8373 	uint32_t reg;
   8374 	int x;
   8375 
   8376 	reg = CSR_READ(sc, WMREG_EECD);
   8377 
   8378 	for (x = nbits; x > 0; x--) {
   8379 		if (bits & (1U << (x - 1)))
   8380 			reg |= EECD_DI;
   8381 		else
   8382 			reg &= ~EECD_DI;
   8383 		CSR_WRITE(sc, WMREG_EECD, reg);
   8384 		CSR_WRITE_FLUSH(sc);
   8385 		delay(2);
   8386 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   8387 		CSR_WRITE_FLUSH(sc);
   8388 		delay(2);
   8389 		CSR_WRITE(sc, WMREG_EECD, reg);
   8390 		CSR_WRITE_FLUSH(sc);
   8391 		delay(2);
   8392 	}
   8393 }
   8394 
   8395 /*
   8396  * wm_eeprom_recvbits:
   8397  *
   8398  *	Receive a series of bits from the EEPROM.
   8399  */
   8400 static void
   8401 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   8402 {
   8403 	uint32_t reg, val;
   8404 	int x;
   8405 
   8406 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   8407 
   8408 	val = 0;
   8409 	for (x = nbits; x > 0; x--) {
   8410 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   8411 		CSR_WRITE_FLUSH(sc);
   8412 		delay(2);
   8413 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   8414 			val |= (1U << (x - 1));
   8415 		CSR_WRITE(sc, WMREG_EECD, reg);
   8416 		CSR_WRITE_FLUSH(sc);
   8417 		delay(2);
   8418 	}
   8419 	*valp = val;
   8420 }
   8421 
   8422 /* Microwire */
   8423 
   8424 /*
   8425  * wm_nvm_read_uwire:
   8426  *
   8427  *	Read a word from the EEPROM using the MicroWire protocol.
   8428  */
   8429 static int
   8430 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8431 {
   8432 	uint32_t reg, val;
   8433 	int i;
   8434 
   8435 	for (i = 0; i < wordcnt; i++) {
   8436 		/* Clear SK and DI. */
   8437 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   8438 		CSR_WRITE(sc, WMREG_EECD, reg);
   8439 
   8440 		/*
   8441 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   8442 		 * and Xen.
   8443 		 *
   8444 		 * We use this workaround only for 82540 because qemu's
   8445 		 * e1000 act as 82540.
   8446 		 */
   8447 		if (sc->sc_type == WM_T_82540) {
   8448 			reg |= EECD_SK;
   8449 			CSR_WRITE(sc, WMREG_EECD, reg);
   8450 			reg &= ~EECD_SK;
   8451 			CSR_WRITE(sc, WMREG_EECD, reg);
   8452 			CSR_WRITE_FLUSH(sc);
   8453 			delay(2);
   8454 		}
   8455 		/* XXX: end of workaround */
   8456 
   8457 		/* Set CHIP SELECT. */
   8458 		reg |= EECD_CS;
   8459 		CSR_WRITE(sc, WMREG_EECD, reg);
   8460 		CSR_WRITE_FLUSH(sc);
   8461 		delay(2);
   8462 
   8463 		/* Shift in the READ command. */
   8464 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   8465 
   8466 		/* Shift in address. */
   8467 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   8468 
   8469 		/* Shift out the data. */
   8470 		wm_eeprom_recvbits(sc, &val, 16);
   8471 		data[i] = val & 0xffff;
   8472 
   8473 		/* Clear CHIP SELECT. */
   8474 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   8475 		CSR_WRITE(sc, WMREG_EECD, reg);
   8476 		CSR_WRITE_FLUSH(sc);
   8477 		delay(2);
   8478 	}
   8479 
   8480 	return 0;
   8481 }
   8482 
   8483 /* SPI */
   8484 
   8485 /*
   8486  * Set SPI and FLASH related information from the EECD register.
   8487  * For 82541 and 82547, the word size is taken from EEPROM.
   8488  */
   8489 static int
   8490 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   8491 {
   8492 	int size;
   8493 	uint32_t reg;
   8494 	uint16_t data;
   8495 
   8496 	reg = CSR_READ(sc, WMREG_EECD);
   8497 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   8498 
   8499 	/* Read the size of NVM from EECD by default */
   8500 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   8501 	switch (sc->sc_type) {
   8502 	case WM_T_82541:
   8503 	case WM_T_82541_2:
   8504 	case WM_T_82547:
   8505 	case WM_T_82547_2:
   8506 		/* Set dummy value to access EEPROM */
   8507 		sc->sc_nvm_wordsize = 64;
   8508 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   8509 		reg = data;
   8510 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   8511 		if (size == 0)
   8512 			size = 6; /* 64 word size */
   8513 		else
   8514 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   8515 		break;
   8516 	case WM_T_80003:
   8517 	case WM_T_82571:
   8518 	case WM_T_82572:
   8519 	case WM_T_82573: /* SPI case */
   8520 	case WM_T_82574: /* SPI case */
   8521 	case WM_T_82583: /* SPI case */
   8522 		size += NVM_WORD_SIZE_BASE_SHIFT;
   8523 		if (size > 14)
   8524 			size = 14;
   8525 		break;
   8526 	case WM_T_82575:
   8527 	case WM_T_82576:
   8528 	case WM_T_82580:
   8529 	case WM_T_I350:
   8530 	case WM_T_I354:
   8531 	case WM_T_I210:
   8532 	case WM_T_I211:
   8533 		size += NVM_WORD_SIZE_BASE_SHIFT;
   8534 		if (size > 15)
   8535 			size = 15;
   8536 		break;
   8537 	default:
   8538 		aprint_error_dev(sc->sc_dev,
   8539 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   8540 		return -1;
   8541 		break;
   8542 	}
   8543 
   8544 	sc->sc_nvm_wordsize = 1 << size;
   8545 
   8546 	return 0;
   8547 }
   8548 
   8549 /*
   8550  * wm_nvm_ready_spi:
   8551  *
   8552  *	Wait for a SPI EEPROM to be ready for commands.
   8553  */
   8554 static int
   8555 wm_nvm_ready_spi(struct wm_softc *sc)
   8556 {
   8557 	uint32_t val;
   8558 	int usec;
   8559 
   8560 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   8561 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   8562 		wm_eeprom_recvbits(sc, &val, 8);
   8563 		if ((val & SPI_SR_RDY) == 0)
   8564 			break;
   8565 	}
   8566 	if (usec >= SPI_MAX_RETRIES) {
   8567 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   8568 		return 1;
   8569 	}
   8570 	return 0;
   8571 }
   8572 
   8573 /*
   8574  * wm_nvm_read_spi:
   8575  *
   8576  *	Read a work from the EEPROM using the SPI protocol.
   8577  */
   8578 static int
   8579 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8580 {
   8581 	uint32_t reg, val;
   8582 	int i;
   8583 	uint8_t opc;
   8584 
   8585 	/* Clear SK and CS. */
   8586 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   8587 	CSR_WRITE(sc, WMREG_EECD, reg);
   8588 	CSR_WRITE_FLUSH(sc);
   8589 	delay(2);
   8590 
   8591 	if (wm_nvm_ready_spi(sc))
   8592 		return 1;
   8593 
   8594 	/* Toggle CS to flush commands. */
   8595 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   8596 	CSR_WRITE_FLUSH(sc);
   8597 	delay(2);
   8598 	CSR_WRITE(sc, WMREG_EECD, reg);
   8599 	CSR_WRITE_FLUSH(sc);
   8600 	delay(2);
   8601 
   8602 	opc = SPI_OPC_READ;
   8603 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   8604 		opc |= SPI_OPC_A8;
   8605 
   8606 	wm_eeprom_sendbits(sc, opc, 8);
   8607 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   8608 
   8609 	for (i = 0; i < wordcnt; i++) {
   8610 		wm_eeprom_recvbits(sc, &val, 16);
   8611 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   8612 	}
   8613 
   8614 	/* Raise CS and clear SK. */
   8615 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   8616 	CSR_WRITE(sc, WMREG_EECD, reg);
   8617 	CSR_WRITE_FLUSH(sc);
   8618 	delay(2);
   8619 
   8620 	return 0;
   8621 }
   8622 
   8623 /* Using with EERD */
   8624 
   8625 static int
   8626 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   8627 {
   8628 	uint32_t attempts = 100000;
   8629 	uint32_t i, reg = 0;
   8630 	int32_t done = -1;
   8631 
   8632 	for (i = 0; i < attempts; i++) {
   8633 		reg = CSR_READ(sc, rw);
   8634 
   8635 		if (reg & EERD_DONE) {
   8636 			done = 0;
   8637 			break;
   8638 		}
   8639 		delay(5);
   8640 	}
   8641 
   8642 	return done;
   8643 }
   8644 
   8645 static int
   8646 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   8647     uint16_t *data)
   8648 {
   8649 	int i, eerd = 0;
   8650 	int error = 0;
   8651 
   8652 	for (i = 0; i < wordcnt; i++) {
   8653 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   8654 
   8655 		CSR_WRITE(sc, WMREG_EERD, eerd);
   8656 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   8657 		if (error != 0)
   8658 			break;
   8659 
   8660 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   8661 	}
   8662 
   8663 	return error;
   8664 }
   8665 
   8666 /* Flash */
   8667 
   8668 static int
   8669 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   8670 {
   8671 	uint32_t eecd;
   8672 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   8673 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   8674 	uint8_t sig_byte = 0;
   8675 
   8676 	switch (sc->sc_type) {
   8677 	case WM_T_ICH8:
   8678 	case WM_T_ICH9:
   8679 		eecd = CSR_READ(sc, WMREG_EECD);
   8680 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   8681 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   8682 			return 0;
   8683 		}
   8684 		/* FALLTHROUGH */
   8685 	default:
   8686 		/* Default to 0 */
   8687 		*bank = 0;
   8688 
   8689 		/* Check bank 0 */
   8690 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   8691 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   8692 			*bank = 0;
   8693 			return 0;
   8694 		}
   8695 
   8696 		/* Check bank 1 */
   8697 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   8698 		    &sig_byte);
   8699 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   8700 			*bank = 1;
   8701 			return 0;
   8702 		}
   8703 	}
   8704 
   8705 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   8706 		device_xname(sc->sc_dev)));
   8707 	return -1;
   8708 }
   8709 
   8710 /******************************************************************************
   8711  * This function does initial flash setup so that a new read/write/erase cycle
   8712  * can be started.
   8713  *
   8714  * sc - The pointer to the hw structure
   8715  ****************************************************************************/
   8716 static int32_t
   8717 wm_ich8_cycle_init(struct wm_softc *sc)
   8718 {
   8719 	uint16_t hsfsts;
   8720 	int32_t error = 1;
   8721 	int32_t i     = 0;
   8722 
   8723 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8724 
   8725 	/* May be check the Flash Des Valid bit in Hw status */
   8726 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   8727 		return error;
   8728 	}
   8729 
   8730 	/* Clear FCERR in Hw status by writing 1 */
   8731 	/* Clear DAEL in Hw status by writing a 1 */
   8732 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   8733 
   8734 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8735 
   8736 	/*
   8737 	 * Either we should have a hardware SPI cycle in progress bit to check
   8738 	 * against, in order to start a new cycle or FDONE bit should be
   8739 	 * changed in the hardware so that it is 1 after harware reset, which
   8740 	 * can then be used as an indication whether a cycle is in progress or
   8741 	 * has been completed .. we should also have some software semaphore
   8742 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   8743 	 * threads access to those bits can be sequentiallized or a way so that
   8744 	 * 2 threads dont start the cycle at the same time
   8745 	 */
   8746 
   8747 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8748 		/*
   8749 		 * There is no cycle running at present, so we can start a
   8750 		 * cycle
   8751 		 */
   8752 
   8753 		/* Begin by setting Flash Cycle Done. */
   8754 		hsfsts |= HSFSTS_DONE;
   8755 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8756 		error = 0;
   8757 	} else {
   8758 		/*
   8759 		 * otherwise poll for sometime so the current cycle has a
   8760 		 * chance to end before giving up.
   8761 		 */
   8762 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   8763 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8764 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8765 				error = 0;
   8766 				break;
   8767 			}
   8768 			delay(1);
   8769 		}
   8770 		if (error == 0) {
   8771 			/*
   8772 			 * Successful in waiting for previous cycle to timeout,
   8773 			 * now set the Flash Cycle Done.
   8774 			 */
   8775 			hsfsts |= HSFSTS_DONE;
   8776 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8777 		}
   8778 	}
   8779 	return error;
   8780 }
   8781 
   8782 /******************************************************************************
   8783  * This function starts a flash cycle and waits for its completion
   8784  *
   8785  * sc - The pointer to the hw structure
   8786  ****************************************************************************/
   8787 static int32_t
   8788 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   8789 {
   8790 	uint16_t hsflctl;
   8791 	uint16_t hsfsts;
   8792 	int32_t error = 1;
   8793 	uint32_t i = 0;
   8794 
   8795 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   8796 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8797 	hsflctl |= HSFCTL_GO;
   8798 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8799 
   8800 	/* Wait till FDONE bit is set to 1 */
   8801 	do {
   8802 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8803 		if (hsfsts & HSFSTS_DONE)
   8804 			break;
   8805 		delay(1);
   8806 		i++;
   8807 	} while (i < timeout);
   8808 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   8809 		error = 0;
   8810 
   8811 	return error;
   8812 }
   8813 
   8814 /******************************************************************************
   8815  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   8816  *
   8817  * sc - The pointer to the hw structure
   8818  * index - The index of the byte or word to read.
   8819  * size - Size of data to read, 1=byte 2=word
   8820  * data - Pointer to the word to store the value read.
   8821  *****************************************************************************/
   8822 static int32_t
   8823 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   8824     uint32_t size, uint16_t *data)
   8825 {
   8826 	uint16_t hsfsts;
   8827 	uint16_t hsflctl;
   8828 	uint32_t flash_linear_address;
   8829 	uint32_t flash_data = 0;
   8830 	int32_t error = 1;
   8831 	int32_t count = 0;
   8832 
   8833 	if (size < 1  || size > 2 || data == 0x0 ||
   8834 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   8835 		return error;
   8836 
   8837 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   8838 	    sc->sc_ich8_flash_base;
   8839 
   8840 	do {
   8841 		delay(1);
   8842 		/* Steps */
   8843 		error = wm_ich8_cycle_init(sc);
   8844 		if (error)
   8845 			break;
   8846 
   8847 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8848 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   8849 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   8850 		    & HSFCTL_BCOUNT_MASK;
   8851 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   8852 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8853 
   8854 		/*
   8855 		 * Write the last 24 bits of index into Flash Linear address
   8856 		 * field in Flash Address
   8857 		 */
   8858 		/* TODO: TBD maybe check the index against the size of flash */
   8859 
   8860 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   8861 
   8862 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   8863 
   8864 		/*
   8865 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   8866 		 * the whole sequence a few more times, else read in (shift in)
   8867 		 * the Flash Data0, the order is least significant byte first
   8868 		 * msb to lsb
   8869 		 */
   8870 		if (error == 0) {
   8871 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   8872 			if (size == 1)
   8873 				*data = (uint8_t)(flash_data & 0x000000FF);
   8874 			else if (size == 2)
   8875 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   8876 			break;
   8877 		} else {
   8878 			/*
   8879 			 * If we've gotten here, then things are probably
   8880 			 * completely hosed, but if the error condition is
   8881 			 * detected, it won't hurt to give it another try...
   8882 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   8883 			 */
   8884 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8885 			if (hsfsts & HSFSTS_ERR) {
   8886 				/* Repeat for some time before giving up. */
   8887 				continue;
   8888 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   8889 				break;
   8890 		}
   8891 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   8892 
   8893 	return error;
   8894 }
   8895 
   8896 /******************************************************************************
   8897  * Reads a single byte from the NVM using the ICH8 flash access registers.
   8898  *
   8899  * sc - pointer to wm_hw structure
   8900  * index - The index of the byte to read.
   8901  * data - Pointer to a byte to store the value read.
   8902  *****************************************************************************/
   8903 static int32_t
   8904 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   8905 {
   8906 	int32_t status;
   8907 	uint16_t word = 0;
   8908 
   8909 	status = wm_read_ich8_data(sc, index, 1, &word);
   8910 	if (status == 0)
   8911 		*data = (uint8_t)word;
   8912 	else
   8913 		*data = 0;
   8914 
   8915 	return status;
   8916 }
   8917 
   8918 /******************************************************************************
   8919  * Reads a word from the NVM using the ICH8 flash access registers.
   8920  *
   8921  * sc - pointer to wm_hw structure
   8922  * index - The starting byte index of the word to read.
   8923  * data - Pointer to a word to store the value read.
   8924  *****************************************************************************/
   8925 static int32_t
   8926 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   8927 {
   8928 	int32_t status;
   8929 
   8930 	status = wm_read_ich8_data(sc, index, 2, data);
   8931 	return status;
   8932 }
   8933 
   8934 /******************************************************************************
   8935  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   8936  * register.
   8937  *
   8938  * sc - Struct containing variables accessed by shared code
   8939  * offset - offset of word in the EEPROM to read
   8940  * data - word read from the EEPROM
   8941  * words - number of words to read
   8942  *****************************************************************************/
   8943 static int
   8944 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   8945 {
   8946 	int32_t  error = 0;
   8947 	uint32_t flash_bank = 0;
   8948 	uint32_t act_offset = 0;
   8949 	uint32_t bank_offset = 0;
   8950 	uint16_t word = 0;
   8951 	uint16_t i = 0;
   8952 
   8953 	/*
   8954 	 * We need to know which is the valid flash bank.  In the event
   8955 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   8956 	 * managing flash_bank.  So it cannot be trusted and needs
   8957 	 * to be updated with each read.
   8958 	 */
   8959 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   8960 	if (error) {
   8961 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   8962 			device_xname(sc->sc_dev)));
   8963 		flash_bank = 0;
   8964 	}
   8965 
   8966 	/*
   8967 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   8968 	 * size
   8969 	 */
   8970 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   8971 
   8972 	error = wm_get_swfwhw_semaphore(sc);
   8973 	if (error) {
   8974 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8975 		    __func__);
   8976 		return error;
   8977 	}
   8978 
   8979 	for (i = 0; i < words; i++) {
   8980 		/* The NVM part needs a byte offset, hence * 2 */
   8981 		act_offset = bank_offset + ((offset + i) * 2);
   8982 		error = wm_read_ich8_word(sc, act_offset, &word);
   8983 		if (error) {
   8984 			aprint_error_dev(sc->sc_dev,
   8985 			    "%s: failed to read NVM\n", __func__);
   8986 			break;
   8987 		}
   8988 		data[i] = word;
   8989 	}
   8990 
   8991 	wm_put_swfwhw_semaphore(sc);
   8992 	return error;
   8993 }
   8994 
   8995 /* iNVM */
   8996 
   8997 static int
   8998 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   8999 {
   9000 	int32_t  rv = 0;
   9001 	uint32_t invm_dword;
   9002 	uint16_t i;
   9003 	uint8_t record_type, word_address;
   9004 
   9005 	for (i = 0; i < INVM_SIZE; i++) {
   9006 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   9007 		/* Get record type */
   9008 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   9009 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   9010 			break;
   9011 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   9012 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   9013 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   9014 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   9015 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   9016 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   9017 			if (word_address == address) {
   9018 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   9019 				rv = 0;
   9020 				break;
   9021 			}
   9022 		}
   9023 	}
   9024 
   9025 	return rv;
   9026 }
   9027 
   9028 static int
   9029 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   9030 {
   9031 	int rv = 0;
   9032 	int i;
   9033 
   9034 	for (i = 0; i < words; i++) {
   9035 		switch (offset + i) {
   9036 		case NVM_OFF_MACADDR:
   9037 		case NVM_OFF_MACADDR1:
   9038 		case NVM_OFF_MACADDR2:
   9039 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   9040 			if (rv != 0) {
   9041 				data[i] = 0xffff;
   9042 				rv = -1;
   9043 			}
   9044 			break;
   9045 		case NVM_OFF_CFG2:
   9046 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9047 			if (rv != 0) {
   9048 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   9049 				rv = 0;
   9050 			}
   9051 			break;
   9052 		case NVM_OFF_CFG4:
   9053 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9054 			if (rv != 0) {
   9055 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   9056 				rv = 0;
   9057 			}
   9058 			break;
   9059 		case NVM_OFF_LED_1_CFG:
   9060 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9061 			if (rv != 0) {
   9062 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   9063 				rv = 0;
   9064 			}
   9065 			break;
   9066 		case NVM_OFF_LED_0_2_CFG:
   9067 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9068 			if (rv != 0) {
   9069 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   9070 				rv = 0;
   9071 			}
   9072 			break;
   9073 		case NVM_OFF_ID_LED_SETTINGS:
   9074 			rv = wm_nvm_read_word_invm(sc, offset, data);
   9075 			if (rv != 0) {
   9076 				*data = ID_LED_RESERVED_FFFF;
   9077 				rv = 0;
   9078 			}
   9079 			break;
   9080 		default:
   9081 			DPRINTF(WM_DEBUG_NVM,
   9082 			    ("NVM word 0x%02x is not mapped.\n", offset));
   9083 			*data = NVM_RESERVED_WORD;
   9084 			break;
   9085 		}
   9086 	}
   9087 
   9088 	return rv;
   9089 }
   9090 
   9091 /* Lock, detecting NVM type, validate checksum, version and read */
   9092 
   9093 /*
   9094  * wm_nvm_acquire:
   9095  *
   9096  *	Perform the EEPROM handshake required on some chips.
   9097  */
   9098 static int
   9099 wm_nvm_acquire(struct wm_softc *sc)
   9100 {
   9101 	uint32_t reg;
   9102 	int x;
   9103 	int ret = 0;
   9104 
   9105 	/* always success */
   9106 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   9107 		return 0;
   9108 
   9109 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9110 		ret = wm_get_swfwhw_semaphore(sc);
   9111 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9112 		/* This will also do wm_get_swsm_semaphore() if needed */
   9113 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   9114 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9115 		ret = wm_get_swsm_semaphore(sc);
   9116 	}
   9117 
   9118 	if (ret) {
   9119 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9120 			__func__);
   9121 		return 1;
   9122 	}
   9123 
   9124 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   9125 		reg = CSR_READ(sc, WMREG_EECD);
   9126 
   9127 		/* Request EEPROM access. */
   9128 		reg |= EECD_EE_REQ;
   9129 		CSR_WRITE(sc, WMREG_EECD, reg);
   9130 
   9131 		/* ..and wait for it to be granted. */
   9132 		for (x = 0; x < 1000; x++) {
   9133 			reg = CSR_READ(sc, WMREG_EECD);
   9134 			if (reg & EECD_EE_GNT)
   9135 				break;
   9136 			delay(5);
   9137 		}
   9138 		if ((reg & EECD_EE_GNT) == 0) {
   9139 			aprint_error_dev(sc->sc_dev,
   9140 			    "could not acquire EEPROM GNT\n");
   9141 			reg &= ~EECD_EE_REQ;
   9142 			CSR_WRITE(sc, WMREG_EECD, reg);
   9143 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9144 				wm_put_swfwhw_semaphore(sc);
   9145 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   9146 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   9147 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   9148 				wm_put_swsm_semaphore(sc);
   9149 			return 1;
   9150 		}
   9151 	}
   9152 
   9153 	return 0;
   9154 }
   9155 
   9156 /*
   9157  * wm_nvm_release:
   9158  *
   9159  *	Release the EEPROM mutex.
   9160  */
   9161 static void
   9162 wm_nvm_release(struct wm_softc *sc)
   9163 {
   9164 	uint32_t reg;
   9165 
   9166 	/* always success */
   9167 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   9168 		return;
   9169 
   9170 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   9171 		reg = CSR_READ(sc, WMREG_EECD);
   9172 		reg &= ~EECD_EE_REQ;
   9173 		CSR_WRITE(sc, WMREG_EECD, reg);
   9174 	}
   9175 
   9176 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9177 		wm_put_swfwhw_semaphore(sc);
   9178 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9179 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   9180 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   9181 		wm_put_swsm_semaphore(sc);
   9182 }
   9183 
   9184 static int
   9185 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   9186 {
   9187 	uint32_t eecd = 0;
   9188 
   9189 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   9190 	    || sc->sc_type == WM_T_82583) {
   9191 		eecd = CSR_READ(sc, WMREG_EECD);
   9192 
   9193 		/* Isolate bits 15 & 16 */
   9194 		eecd = ((eecd >> 15) & 0x03);
   9195 
   9196 		/* If both bits are set, device is Flash type */
   9197 		if (eecd == 0x03)
   9198 			return 0;
   9199 	}
   9200 	return 1;
   9201 }
   9202 
   9203 static int
   9204 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   9205 {
   9206 	uint32_t eec;
   9207 
   9208 	eec = CSR_READ(sc, WMREG_EEC);
   9209 	if ((eec & EEC_FLASH_DETECTED) != 0)
   9210 		return 1;
   9211 
   9212 	return 0;
   9213 }
   9214 
   9215 /*
   9216  * wm_nvm_validate_checksum
   9217  *
   9218  * The checksum is defined as the sum of the first 64 (16 bit) words.
   9219  */
   9220 static int
   9221 wm_nvm_validate_checksum(struct wm_softc *sc)
   9222 {
   9223 	uint16_t checksum;
   9224 	uint16_t eeprom_data;
   9225 #ifdef WM_DEBUG
   9226 	uint16_t csum_wordaddr, valid_checksum;
   9227 #endif
   9228 	int i;
   9229 
   9230 	checksum = 0;
   9231 
   9232 	/* Don't check for I211 */
   9233 	if (sc->sc_type == WM_T_I211)
   9234 		return 0;
   9235 
   9236 #ifdef WM_DEBUG
   9237 	if (sc->sc_type == WM_T_PCH_LPT) {
   9238 		csum_wordaddr = NVM_OFF_COMPAT;
   9239 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   9240 	} else {
   9241 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   9242 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   9243 	}
   9244 
   9245 	/* Dump EEPROM image for debug */
   9246 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   9247 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   9248 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   9249 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   9250 		if ((eeprom_data & valid_checksum) == 0) {
   9251 			DPRINTF(WM_DEBUG_NVM,
   9252 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   9253 				device_xname(sc->sc_dev), eeprom_data,
   9254 				    valid_checksum));
   9255 		}
   9256 	}
   9257 
   9258 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   9259 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   9260 		for (i = 0; i < NVM_SIZE; i++) {
   9261 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   9262 				printf("XXXX ");
   9263 			else
   9264 				printf("%04hx ", eeprom_data);
   9265 			if (i % 8 == 7)
   9266 				printf("\n");
   9267 		}
   9268 	}
   9269 
   9270 #endif /* WM_DEBUG */
   9271 
   9272 	for (i = 0; i < NVM_SIZE; i++) {
   9273 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   9274 			return 1;
   9275 		checksum += eeprom_data;
   9276 	}
   9277 
   9278 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   9279 #ifdef WM_DEBUG
   9280 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   9281 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   9282 #endif
   9283 	}
   9284 
   9285 	return 0;
   9286 }
   9287 
   9288 static void
   9289 wm_nvm_version(struct wm_softc *sc)
   9290 {
   9291 	uint16_t major, minor, build, patch;
   9292 	uint16_t uid0, uid1;
   9293 	uint16_t nvm_data;
   9294 	uint16_t off;
   9295 	bool check_version = false;
   9296 	bool check_optionrom = false;
   9297 
   9298 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   9299 	switch (sc->sc_type) {
   9300 	case WM_T_82575:
   9301 	case WM_T_82576:
   9302 	case WM_T_82580:
   9303 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   9304 			check_version = true;
   9305 		break;
   9306 	case WM_T_I211:
   9307 		/* XXX wm_nvm_version_invm(sc); */
   9308 		return;
   9309 	case WM_T_I210:
   9310 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   9311 			/* XXX wm_nvm_version_invm(sc); */
   9312 			return;
   9313 		}
   9314 		/* FALLTHROUGH */
   9315 	case WM_T_I350:
   9316 	case WM_T_I354:
   9317 		check_version = true;
   9318 		check_optionrom = true;
   9319 		break;
   9320 	default:
   9321 		/* XXX Should we print PXE boot agent's version? */
   9322 		return;
   9323 	}
   9324 	if (check_version) {
   9325 		bool have_build = false;
   9326 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   9327 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   9328 		if ((nvm_data & 0x0f00) == 0x0000)
   9329 			minor = nvm_data & 0x00ff;
   9330 		else {
   9331 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   9332 			build = nvm_data & NVM_BUILD_MASK;
   9333 			have_build = true;
   9334 		}
   9335 		/* Decimal */
   9336 		minor = (minor / 16) * 10 + (minor % 16);
   9337 
   9338 		aprint_verbose(", version %d.%d", major, minor);
   9339 		if (have_build)
   9340 			aprint_verbose(" build %d", build);
   9341 		sc->sc_nvm_ver_major = major;
   9342 		sc->sc_nvm_ver_minor = minor;
   9343 	}
   9344 	if (check_optionrom) {
   9345 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   9346 		/* Option ROM Version */
   9347 		if ((off != 0x0000) && (off != 0xffff)) {
   9348 			off += NVM_COMBO_VER_OFF;
   9349 			wm_nvm_read(sc, off + 1, 1, &uid1);
   9350 			wm_nvm_read(sc, off, 1, &uid0);
   9351 			if ((uid0 != 0) && (uid0 != 0xffff)
   9352 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   9353 				/* 16bits */
   9354 				major = uid0 >> 8;
   9355 				build = (uid0 << 8) | (uid1 >> 8);
   9356 				patch = uid1 & 0x00ff;
   9357 				aprint_verbose(", option ROM Version %d.%d.%d",
   9358 				    major, build, patch);
   9359 			}
   9360 		}
   9361 	}
   9362 
   9363 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   9364 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   9365 }
   9366 
   9367 /*
   9368  * wm_nvm_read:
   9369  *
   9370  *	Read data from the serial EEPROM.
   9371  */
   9372 static int
   9373 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9374 {
   9375 	int rv;
   9376 
   9377 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   9378 		return 1;
   9379 
   9380 	if (wm_nvm_acquire(sc))
   9381 		return 1;
   9382 
   9383 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   9384 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   9385 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   9386 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   9387 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   9388 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   9389 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   9390 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   9391 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   9392 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   9393 	else
   9394 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   9395 
   9396 	wm_nvm_release(sc);
   9397 	return rv;
   9398 }
   9399 
   9400 /*
   9401  * Hardware semaphores.
   9402  * Very complexed...
   9403  */
   9404 
   9405 static int
   9406 wm_get_swsm_semaphore(struct wm_softc *sc)
   9407 {
   9408 	int32_t timeout;
   9409 	uint32_t swsm;
   9410 
   9411 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9412 		/* Get the SW semaphore. */
   9413 		timeout = sc->sc_nvm_wordsize + 1;
   9414 		while (timeout) {
   9415 			swsm = CSR_READ(sc, WMREG_SWSM);
   9416 
   9417 			if ((swsm & SWSM_SMBI) == 0)
   9418 				break;
   9419 
   9420 			delay(50);
   9421 			timeout--;
   9422 		}
   9423 
   9424 		if (timeout == 0) {
   9425 			aprint_error_dev(sc->sc_dev,
   9426 			    "could not acquire SWSM SMBI\n");
   9427 			return 1;
   9428 		}
   9429 	}
   9430 
   9431 	/* Get the FW semaphore. */
   9432 	timeout = sc->sc_nvm_wordsize + 1;
   9433 	while (timeout) {
   9434 		swsm = CSR_READ(sc, WMREG_SWSM);
   9435 		swsm |= SWSM_SWESMBI;
   9436 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   9437 		/* If we managed to set the bit we got the semaphore. */
   9438 		swsm = CSR_READ(sc, WMREG_SWSM);
   9439 		if (swsm & SWSM_SWESMBI)
   9440 			break;
   9441 
   9442 		delay(50);
   9443 		timeout--;
   9444 	}
   9445 
   9446 	if (timeout == 0) {
   9447 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   9448 		/* Release semaphores */
   9449 		wm_put_swsm_semaphore(sc);
   9450 		return 1;
   9451 	}
   9452 	return 0;
   9453 }
   9454 
   9455 static void
   9456 wm_put_swsm_semaphore(struct wm_softc *sc)
   9457 {
   9458 	uint32_t swsm;
   9459 
   9460 	swsm = CSR_READ(sc, WMREG_SWSM);
   9461 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   9462 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   9463 }
   9464 
   9465 static int
   9466 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   9467 {
   9468 	uint32_t swfw_sync;
   9469 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   9470 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   9471 	int timeout = 200;
   9472 
   9473 	for (timeout = 0; timeout < 200; timeout++) {
   9474 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9475 			if (wm_get_swsm_semaphore(sc)) {
   9476 				aprint_error_dev(sc->sc_dev,
   9477 				    "%s: failed to get semaphore\n",
   9478 				    __func__);
   9479 				return 1;
   9480 			}
   9481 		}
   9482 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   9483 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   9484 			swfw_sync |= swmask;
   9485 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   9486 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   9487 				wm_put_swsm_semaphore(sc);
   9488 			return 0;
   9489 		}
   9490 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   9491 			wm_put_swsm_semaphore(sc);
   9492 		delay(5000);
   9493 	}
   9494 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   9495 	    device_xname(sc->sc_dev), mask, swfw_sync);
   9496 	return 1;
   9497 }
   9498 
   9499 static void
   9500 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   9501 {
   9502 	uint32_t swfw_sync;
   9503 
   9504 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   9505 		while (wm_get_swsm_semaphore(sc) != 0)
   9506 			continue;
   9507 	}
   9508 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   9509 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   9510 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   9511 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   9512 		wm_put_swsm_semaphore(sc);
   9513 }
   9514 
   9515 static int
   9516 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   9517 {
   9518 	uint32_t ext_ctrl;
   9519 	int timeout = 200;
   9520 
   9521 	for (timeout = 0; timeout < 200; timeout++) {
   9522 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   9523 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   9524 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   9525 
   9526 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   9527 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   9528 			return 0;
   9529 		delay(5000);
   9530 	}
   9531 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   9532 	    device_xname(sc->sc_dev), ext_ctrl);
   9533 	return 1;
   9534 }
   9535 
   9536 static void
   9537 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   9538 {
   9539 	uint32_t ext_ctrl;
   9540 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   9541 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   9542 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   9543 }
   9544 
   9545 static int
   9546 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   9547 {
   9548 	int i = 0;
   9549 	uint32_t reg;
   9550 
   9551 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9552 	do {
   9553 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   9554 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   9555 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9556 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   9557 			break;
   9558 		delay(2*1000);
   9559 		i++;
   9560 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   9561 
   9562 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   9563 		wm_put_hw_semaphore_82573(sc);
   9564 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   9565 		    device_xname(sc->sc_dev));
   9566 		return -1;
   9567 	}
   9568 
   9569 	return 0;
   9570 }
   9571 
   9572 static void
   9573 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   9574 {
   9575 	uint32_t reg;
   9576 
   9577 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9578 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   9579 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   9580 }
   9581 
   9582 /*
   9583  * Management mode and power management related subroutines.
   9584  * BMC, AMT, suspend/resume and EEE.
   9585  */
   9586 
   9587 static int
   9588 wm_check_mng_mode(struct wm_softc *sc)
   9589 {
   9590 	int rv;
   9591 
   9592 	switch (sc->sc_type) {
   9593 	case WM_T_ICH8:
   9594 	case WM_T_ICH9:
   9595 	case WM_T_ICH10:
   9596 	case WM_T_PCH:
   9597 	case WM_T_PCH2:
   9598 	case WM_T_PCH_LPT:
   9599 		rv = wm_check_mng_mode_ich8lan(sc);
   9600 		break;
   9601 	case WM_T_82574:
   9602 	case WM_T_82583:
   9603 		rv = wm_check_mng_mode_82574(sc);
   9604 		break;
   9605 	case WM_T_82571:
   9606 	case WM_T_82572:
   9607 	case WM_T_82573:
   9608 	case WM_T_80003:
   9609 		rv = wm_check_mng_mode_generic(sc);
   9610 		break;
   9611 	default:
   9612 		/* noting to do */
   9613 		rv = 0;
   9614 		break;
   9615 	}
   9616 
   9617 	return rv;
   9618 }
   9619 
   9620 static int
   9621 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   9622 {
   9623 	uint32_t fwsm;
   9624 
   9625 	fwsm = CSR_READ(sc, WMREG_FWSM);
   9626 
   9627 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   9628 		return 1;
   9629 
   9630 	return 0;
   9631 }
   9632 
   9633 static int
   9634 wm_check_mng_mode_82574(struct wm_softc *sc)
   9635 {
   9636 	uint16_t data;
   9637 
   9638 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   9639 
   9640 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   9641 		return 1;
   9642 
   9643 	return 0;
   9644 }
   9645 
   9646 static int
   9647 wm_check_mng_mode_generic(struct wm_softc *sc)
   9648 {
   9649 	uint32_t fwsm;
   9650 
   9651 	fwsm = CSR_READ(sc, WMREG_FWSM);
   9652 
   9653 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   9654 		return 1;
   9655 
   9656 	return 0;
   9657 }
   9658 
   9659 static int
   9660 wm_enable_mng_pass_thru(struct wm_softc *sc)
   9661 {
   9662 	uint32_t manc, fwsm, factps;
   9663 
   9664 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   9665 		return 0;
   9666 
   9667 	manc = CSR_READ(sc, WMREG_MANC);
   9668 
   9669 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   9670 		device_xname(sc->sc_dev), manc));
   9671 	if ((manc & MANC_RECV_TCO_EN) == 0)
   9672 		return 0;
   9673 
   9674 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   9675 		fwsm = CSR_READ(sc, WMREG_FWSM);
   9676 		factps = CSR_READ(sc, WMREG_FACTPS);
   9677 		if (((factps & FACTPS_MNGCG) == 0)
   9678 		    && ((fwsm & FWSM_MODE_MASK)
   9679 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   9680 			return 1;
   9681 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9682 		uint16_t data;
   9683 
   9684 		factps = CSR_READ(sc, WMREG_FACTPS);
   9685 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   9686 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   9687 			device_xname(sc->sc_dev), factps, data));
   9688 		if (((factps & FACTPS_MNGCG) == 0)
   9689 		    && ((data & NVM_CFG2_MNGM_MASK)
   9690 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   9691 			return 1;
   9692 	} else if (((manc & MANC_SMBUS_EN) != 0)
   9693 	    && ((manc & MANC_ASF_EN) == 0))
   9694 		return 1;
   9695 
   9696 	return 0;
   9697 }
   9698 
   9699 static int
   9700 wm_check_reset_block(struct wm_softc *sc)
   9701 {
   9702 	uint32_t reg;
   9703 
   9704 	switch (sc->sc_type) {
   9705 	case WM_T_ICH8:
   9706 	case WM_T_ICH9:
   9707 	case WM_T_ICH10:
   9708 	case WM_T_PCH:
   9709 	case WM_T_PCH2:
   9710 	case WM_T_PCH_LPT:
   9711 		reg = CSR_READ(sc, WMREG_FWSM);
   9712 		if ((reg & FWSM_RSPCIPHY) != 0)
   9713 			return 0;
   9714 		else
   9715 			return -1;
   9716 		break;
   9717 	case WM_T_82571:
   9718 	case WM_T_82572:
   9719 	case WM_T_82573:
   9720 	case WM_T_82574:
   9721 	case WM_T_82583:
   9722 	case WM_T_80003:
   9723 		reg = CSR_READ(sc, WMREG_MANC);
   9724 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   9725 			return -1;
   9726 		else
   9727 			return 0;
   9728 		break;
   9729 	default:
   9730 		/* no problem */
   9731 		break;
   9732 	}
   9733 
   9734 	return 0;
   9735 }
   9736 
   9737 static void
   9738 wm_get_hw_control(struct wm_softc *sc)
   9739 {
   9740 	uint32_t reg;
   9741 
   9742 	switch (sc->sc_type) {
   9743 	case WM_T_82573:
   9744 		reg = CSR_READ(sc, WMREG_SWSM);
   9745 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   9746 		break;
   9747 	case WM_T_82571:
   9748 	case WM_T_82572:
   9749 	case WM_T_82574:
   9750 	case WM_T_82583:
   9751 	case WM_T_80003:
   9752 	case WM_T_ICH8:
   9753 	case WM_T_ICH9:
   9754 	case WM_T_ICH10:
   9755 	case WM_T_PCH:
   9756 	case WM_T_PCH2:
   9757 	case WM_T_PCH_LPT:
   9758 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9759 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   9760 		break;
   9761 	default:
   9762 		break;
   9763 	}
   9764 }
   9765 
   9766 static void
   9767 wm_release_hw_control(struct wm_softc *sc)
   9768 {
   9769 	uint32_t reg;
   9770 
   9771 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   9772 		return;
   9773 
   9774 	if (sc->sc_type == WM_T_82573) {
   9775 		reg = CSR_READ(sc, WMREG_SWSM);
   9776 		reg &= ~SWSM_DRV_LOAD;
   9777 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   9778 	} else {
   9779 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9780 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   9781 	}
   9782 }
   9783 
   9784 static void
   9785 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   9786 {
   9787 	uint32_t reg;
   9788 
   9789 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   9790 
   9791 	if (on != 0)
   9792 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   9793 	else
   9794 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   9795 
   9796 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   9797 }
   9798 
   9799 static void
   9800 wm_smbustopci(struct wm_softc *sc)
   9801 {
   9802 	uint32_t fwsm;
   9803 
   9804 	fwsm = CSR_READ(sc, WMREG_FWSM);
   9805 	if (((fwsm & FWSM_FW_VALID) == 0)
   9806 	    && ((wm_check_reset_block(sc) == 0))) {
   9807 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   9808 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   9809 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9810 		CSR_WRITE_FLUSH(sc);
   9811 		delay(10);
   9812 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   9813 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9814 		CSR_WRITE_FLUSH(sc);
   9815 		delay(50*1000);
   9816 
   9817 		/*
   9818 		 * Gate automatic PHY configuration by hardware on non-managed
   9819 		 * 82579
   9820 		 */
   9821 		if (sc->sc_type == WM_T_PCH2)
   9822 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   9823 	}
   9824 }
   9825 
   9826 static void
   9827 wm_init_manageability(struct wm_softc *sc)
   9828 {
   9829 
   9830 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   9831 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   9832 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   9833 
   9834 		/* Disable hardware interception of ARP */
   9835 		manc &= ~MANC_ARP_EN;
   9836 
   9837 		/* Enable receiving management packets to the host */
   9838 		if (sc->sc_type >= WM_T_82571) {
   9839 			manc |= MANC_EN_MNG2HOST;
   9840 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   9841 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   9842 		}
   9843 
   9844 		CSR_WRITE(sc, WMREG_MANC, manc);
   9845 	}
   9846 }
   9847 
   9848 static void
   9849 wm_release_manageability(struct wm_softc *sc)
   9850 {
   9851 
   9852 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   9853 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   9854 
   9855 		manc |= MANC_ARP_EN;
   9856 		if (sc->sc_type >= WM_T_82571)
   9857 			manc &= ~MANC_EN_MNG2HOST;
   9858 
   9859 		CSR_WRITE(sc, WMREG_MANC, manc);
   9860 	}
   9861 }
   9862 
   9863 static void
   9864 wm_get_wakeup(struct wm_softc *sc)
   9865 {
   9866 
   9867 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   9868 	switch (sc->sc_type) {
   9869 	case WM_T_82573:
   9870 	case WM_T_82583:
   9871 		sc->sc_flags |= WM_F_HAS_AMT;
   9872 		/* FALLTHROUGH */
   9873 	case WM_T_80003:
   9874 	case WM_T_82541:
   9875 	case WM_T_82547:
   9876 	case WM_T_82571:
   9877 	case WM_T_82572:
   9878 	case WM_T_82574:
   9879 	case WM_T_82575:
   9880 	case WM_T_82576:
   9881 	case WM_T_82580:
   9882 	case WM_T_I350:
   9883 	case WM_T_I354:
   9884 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   9885 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   9886 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   9887 		break;
   9888 	case WM_T_ICH8:
   9889 	case WM_T_ICH9:
   9890 	case WM_T_ICH10:
   9891 	case WM_T_PCH:
   9892 	case WM_T_PCH2:
   9893 	case WM_T_PCH_LPT:
   9894 		sc->sc_flags |= WM_F_HAS_AMT;
   9895 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   9896 		break;
   9897 	default:
   9898 		break;
   9899 	}
   9900 
   9901 	/* 1: HAS_MANAGE */
   9902 	if (wm_enable_mng_pass_thru(sc) != 0)
   9903 		sc->sc_flags |= WM_F_HAS_MANAGE;
   9904 
   9905 #ifdef WM_DEBUG
   9906 	printf("\n");
   9907 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   9908 		printf("HAS_AMT,");
   9909 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   9910 		printf("ARC_SUBSYS_VALID,");
   9911 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   9912 		printf("ASF_FIRMWARE_PRES,");
   9913 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   9914 		printf("HAS_MANAGE,");
   9915 	printf("\n");
   9916 #endif
   9917 	/*
   9918 	 * Note that the WOL flags is set after the resetting of the eeprom
   9919 	 * stuff
   9920 	 */
   9921 }
   9922 
   9923 #ifdef WM_WOL
   9924 /* WOL in the newer chipset interfaces (pchlan) */
   9925 static void
   9926 wm_enable_phy_wakeup(struct wm_softc *sc)
   9927 {
   9928 #if 0
   9929 	uint16_t preg;
   9930 
   9931 	/* Copy MAC RARs to PHY RARs */
   9932 
   9933 	/* Copy MAC MTA to PHY MTA */
   9934 
   9935 	/* Configure PHY Rx Control register */
   9936 
   9937 	/* Enable PHY wakeup in MAC register */
   9938 
   9939 	/* Configure and enable PHY wakeup in PHY registers */
   9940 
   9941 	/* Activate PHY wakeup */
   9942 
   9943 	/* XXX */
   9944 #endif
   9945 }
   9946 
   9947 /* Power down workaround on D3 */
   9948 static void
   9949 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   9950 {
   9951 	uint32_t reg;
   9952 	int i;
   9953 
   9954 	for (i = 0; i < 2; i++) {
   9955 		/* Disable link */
   9956 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9957 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   9958 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9959 
   9960 		/*
   9961 		 * Call gig speed drop workaround on Gig disable before
   9962 		 * accessing any PHY registers
   9963 		 */
   9964 		if (sc->sc_type == WM_T_ICH8)
   9965 			wm_gig_downshift_workaround_ich8lan(sc);
   9966 
   9967 		/* Write VR power-down enable */
   9968 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   9969 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   9970 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   9971 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   9972 
   9973 		/* Read it back and test */
   9974 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   9975 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   9976 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   9977 			break;
   9978 
   9979 		/* Issue PHY reset and repeat at most one more time */
   9980 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9981 	}
   9982 }
   9983 
   9984 static void
   9985 wm_enable_wakeup(struct wm_softc *sc)
   9986 {
   9987 	uint32_t reg, pmreg;
   9988 	pcireg_t pmode;
   9989 
   9990 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   9991 		&pmreg, NULL) == 0)
   9992 		return;
   9993 
   9994 	/* Advertise the wakeup capability */
   9995 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   9996 	    | CTRL_SWDPIN(3));
   9997 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   9998 
   9999 	/* ICH workaround */
   10000 	switch (sc->sc_type) {
   10001 	case WM_T_ICH8:
   10002 	case WM_T_ICH9:
   10003 	case WM_T_ICH10:
   10004 	case WM_T_PCH:
   10005 	case WM_T_PCH2:
   10006 	case WM_T_PCH_LPT:
   10007 		/* Disable gig during WOL */
   10008 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10009 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   10010 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10011 		if (sc->sc_type == WM_T_PCH)
   10012 			wm_gmii_reset(sc);
   10013 
   10014 		/* Power down workaround */
   10015 		if (sc->sc_phytype == WMPHY_82577) {
   10016 			struct mii_softc *child;
   10017 
   10018 			/* Assume that the PHY is copper */
   10019 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10020 			if (child->mii_mpd_rev <= 2)
   10021 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   10022 				    (768 << 5) | 25, 0x0444); /* magic num */
   10023 		}
   10024 		break;
   10025 	default:
   10026 		break;
   10027 	}
   10028 
   10029 	/* Keep the laser running on fiber adapters */
   10030 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   10031 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   10032 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10033 		reg |= CTRL_EXT_SWDPIN(3);
   10034 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10035 	}
   10036 
   10037 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   10038 #if 0	/* for the multicast packet */
   10039 	reg |= WUFC_MC;
   10040 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   10041 #endif
   10042 
   10043 	if (sc->sc_type == WM_T_PCH) {
   10044 		wm_enable_phy_wakeup(sc);
   10045 	} else {
   10046 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   10047 		CSR_WRITE(sc, WMREG_WUFC, reg);
   10048 	}
   10049 
   10050 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10051 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10052 		|| (sc->sc_type == WM_T_PCH2))
   10053 		    && (sc->sc_phytype == WMPHY_IGP_3))
   10054 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   10055 
   10056 	/* Request PME */
   10057 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   10058 #if 0
   10059 	/* Disable WOL */
   10060 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   10061 #else
   10062 	/* For WOL */
   10063 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   10064 #endif
   10065 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   10066 }
   10067 #endif /* WM_WOL */
   10068 
   10069 /* EEE */
   10070 
   10071 static void
   10072 wm_set_eee_i350(struct wm_softc *sc)
   10073 {
   10074 	uint32_t ipcnfg, eeer;
   10075 
   10076 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   10077 	eeer = CSR_READ(sc, WMREG_EEER);
   10078 
   10079 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   10080 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   10081 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   10082 		    | EEER_LPI_FC);
   10083 	} else {
   10084 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   10085 		ipcnfg &= ~IPCNFG_10BASE_TE;
   10086 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   10087 		    | EEER_LPI_FC);
   10088 	}
   10089 
   10090 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   10091 	CSR_WRITE(sc, WMREG_EEER, eeer);
   10092 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   10093 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   10094 }
   10095 
   10096 /*
   10097  * Workarounds (mainly PHY related).
   10098  * Basically, PHY's workarounds are in the PHY drivers.
   10099  */
   10100 
   10101 /* Work-around for 82566 Kumeran PCS lock loss */
   10102 static void
   10103 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   10104 {
   10105 	int miistatus, active, i;
   10106 	int reg;
   10107 
   10108 	miistatus = sc->sc_mii.mii_media_status;
   10109 
   10110 	/* If the link is not up, do nothing */
   10111 	if ((miistatus & IFM_ACTIVE) != 0)
   10112 		return;
   10113 
   10114 	active = sc->sc_mii.mii_media_active;
   10115 
   10116 	/* Nothing to do if the link is other than 1Gbps */
   10117 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   10118 		return;
   10119 
   10120 	for (i = 0; i < 10; i++) {
   10121 		/* read twice */
   10122 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   10123 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   10124 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   10125 			goto out;	/* GOOD! */
   10126 
   10127 		/* Reset the PHY */
   10128 		wm_gmii_reset(sc);
   10129 		delay(5*1000);
   10130 	}
   10131 
   10132 	/* Disable GigE link negotiation */
   10133 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   10134 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   10135 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   10136 
   10137 	/*
   10138 	 * Call gig speed drop workaround on Gig disable before accessing
   10139 	 * any PHY registers.
   10140 	 */
   10141 	wm_gig_downshift_workaround_ich8lan(sc);
   10142 
   10143 out:
   10144 	return;
   10145 }
   10146 
   10147 /* WOL from S5 stops working */
   10148 static void
   10149 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   10150 {
   10151 	uint16_t kmrn_reg;
   10152 
   10153 	/* Only for igp3 */
   10154 	if (sc->sc_phytype == WMPHY_IGP_3) {
   10155 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   10156 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   10157 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   10158 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   10159 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   10160 	}
   10161 }
   10162 
   10163 /*
   10164  * Workaround for pch's PHYs
   10165  * XXX should be moved to new PHY driver?
   10166  */
   10167 static void
   10168 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   10169 {
   10170 	if (sc->sc_phytype == WMPHY_82577)
   10171 		wm_set_mdio_slow_mode_hv(sc);
   10172 
   10173 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   10174 
   10175 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   10176 
   10177 	/* 82578 */
   10178 	if (sc->sc_phytype == WMPHY_82578) {
   10179 		/* PCH rev. < 3 */
   10180 		if (sc->sc_rev < 3) {
   10181 			/* XXX 6 bit shift? Why? Is it page2? */
   10182 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   10183 			    0x66c0);
   10184 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   10185 			    0xffff);
   10186 		}
   10187 
   10188 		/* XXX phy rev. < 2 */
   10189 	}
   10190 
   10191 	/* Select page 0 */
   10192 
   10193 	/* XXX acquire semaphore */
   10194 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   10195 	/* XXX release semaphore */
   10196 
   10197 	/*
   10198 	 * Configure the K1 Si workaround during phy reset assuming there is
   10199 	 * link so that it disables K1 if link is in 1Gbps.
   10200 	 */
   10201 	wm_k1_gig_workaround_hv(sc, 1);
   10202 }
   10203 
   10204 static void
   10205 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   10206 {
   10207 
   10208 	wm_set_mdio_slow_mode_hv(sc);
   10209 }
   10210 
   10211 static void
   10212 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   10213 {
   10214 	int k1_enable = sc->sc_nvm_k1_enabled;
   10215 
   10216 	/* XXX acquire semaphore */
   10217 
   10218 	if (link) {
   10219 		k1_enable = 0;
   10220 
   10221 		/* Link stall fix for link up */
   10222 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   10223 	} else {
   10224 		/* Link stall fix for link down */
   10225 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   10226 	}
   10227 
   10228 	wm_configure_k1_ich8lan(sc, k1_enable);
   10229 
   10230 	/* XXX release semaphore */
   10231 }
   10232 
   10233 static void
   10234 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   10235 {
   10236 	uint32_t reg;
   10237 
   10238 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   10239 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   10240 	    reg | HV_KMRN_MDIO_SLOW);
   10241 }
   10242 
   10243 static void
   10244 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   10245 {
   10246 	uint32_t ctrl, ctrl_ext, tmp;
   10247 	uint16_t kmrn_reg;
   10248 
   10249 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   10250 
   10251 	if (k1_enable)
   10252 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   10253 	else
   10254 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   10255 
   10256 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   10257 
   10258 	delay(20);
   10259 
   10260 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10261 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10262 
   10263 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   10264 	tmp |= CTRL_FRCSPD;
   10265 
   10266 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   10267 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   10268 	CSR_WRITE_FLUSH(sc);
   10269 	delay(20);
   10270 
   10271 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   10272 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10273 	CSR_WRITE_FLUSH(sc);
   10274 	delay(20);
   10275 }
   10276 
   10277 /* special case - for 82575 - need to do manual init ... */
   10278 static void
   10279 wm_reset_init_script_82575(struct wm_softc *sc)
   10280 {
   10281 	/*
   10282 	 * remark: this is untested code - we have no board without EEPROM
   10283 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   10284 	 */
   10285 
   10286 	/* SerDes configuration via SERDESCTRL */
   10287 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   10288 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   10289 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   10290 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   10291 
   10292 	/* CCM configuration via CCMCTL register */
   10293 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   10294 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   10295 
   10296 	/* PCIe lanes configuration */
   10297 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   10298 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   10299 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   10300 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   10301 
   10302 	/* PCIe PLL Configuration */
   10303 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   10304 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   10305 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   10306 }
   10307 
   10308 static void
   10309 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   10310 {
   10311 	uint32_t reg;
   10312 	uint16_t nvmword;
   10313 	int rv;
   10314 
   10315 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10316 		return;
   10317 
   10318 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   10319 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   10320 	if (rv != 0) {
   10321 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   10322 		    __func__);
   10323 		return;
   10324 	}
   10325 
   10326 	reg = CSR_READ(sc, WMREG_MDICNFG);
   10327 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   10328 		reg |= MDICNFG_DEST;
   10329 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   10330 		reg |= MDICNFG_COM_MDIO;
   10331 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   10332 }
   10333 
   10334 /*
   10335  * I210 Errata 25 and I211 Errata 10
   10336  * Slow System Clock.
   10337  */
   10338 static void
   10339 wm_pll_workaround_i210(struct wm_softc *sc)
   10340 {
   10341 	uint32_t mdicnfg, wuc;
   10342 	uint32_t reg;
   10343 	pcireg_t pcireg;
   10344 	uint32_t pmreg;
   10345 	uint16_t nvmword, tmp_nvmword;
   10346 	int phyval;
   10347 	bool wa_done = false;
   10348 	int i;
   10349 
   10350 	/* Save WUC and MDICNFG registers */
   10351 	wuc = CSR_READ(sc, WMREG_WUC);
   10352 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   10353 
   10354 	reg = mdicnfg & ~MDICNFG_DEST;
   10355 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   10356 
   10357 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   10358 		nvmword = INVM_DEFAULT_AL;
   10359 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   10360 
   10361 	/* Get Power Management cap offset */
   10362 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   10363 		&pmreg, NULL) == 0)
   10364 		return;
   10365 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   10366 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   10367 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   10368 
   10369 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   10370 			break; /* OK */
   10371 		}
   10372 
   10373 		wa_done = true;
   10374 		/* Directly reset the internal PHY */
   10375 		reg = CSR_READ(sc, WMREG_CTRL);
   10376 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   10377 
   10378 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10379 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   10380 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10381 
   10382 		CSR_WRITE(sc, WMREG_WUC, 0);
   10383 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   10384 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   10385 
   10386 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   10387 		    pmreg + PCI_PMCSR);
   10388 		pcireg |= PCI_PMCSR_STATE_D3;
   10389 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   10390 		    pmreg + PCI_PMCSR, pcireg);
   10391 		delay(1000);
   10392 		pcireg &= ~PCI_PMCSR_STATE_D3;
   10393 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   10394 		    pmreg + PCI_PMCSR, pcireg);
   10395 
   10396 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   10397 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   10398 
   10399 		/* Restore WUC register */
   10400 		CSR_WRITE(sc, WMREG_WUC, wuc);
   10401 	}
   10402 
   10403 	/* Restore MDICNFG setting */
   10404 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   10405 	if (wa_done)
   10406 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   10407 }
   10408