Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.279
      1 /*	$NetBSD: if_wm.c,v 1.279 2014/07/22 04:20:39 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Rework how parameters are loaded from the EEPROM.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.279 2014/07/22 04:20:39 msaitoh Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/callout.h>
     84 #include <sys/mbuf.h>
     85 #include <sys/malloc.h>
     86 #include <sys/kernel.h>
     87 #include <sys/socket.h>
     88 #include <sys/ioctl.h>
     89 #include <sys/errno.h>
     90 #include <sys/device.h>
     91 #include <sys/queue.h>
     92 #include <sys/syslog.h>
     93 
     94 #include <sys/rnd.h>
     95 
     96 #include <net/if.h>
     97 #include <net/if_dl.h>
     98 #include <net/if_media.h>
     99 #include <net/if_ether.h>
    100 
    101 #include <net/bpf.h>
    102 
    103 #include <netinet/in.h>			/* XXX for struct ip */
    104 #include <netinet/in_systm.h>		/* XXX for struct ip */
    105 #include <netinet/ip.h>			/* XXX for struct ip */
    106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    108 
    109 #include <sys/bus.h>
    110 #include <sys/intr.h>
    111 #include <machine/endian.h>
    112 
    113 #include <dev/mii/mii.h>
    114 #include <dev/mii/miivar.h>
    115 #include <dev/mii/miidevs.h>
    116 #include <dev/mii/mii_bitbang.h>
    117 #include <dev/mii/ikphyreg.h>
    118 #include <dev/mii/igphyreg.h>
    119 #include <dev/mii/igphyvar.h>
    120 #include <dev/mii/inbmphyreg.h>
    121 
    122 #include <dev/pci/pcireg.h>
    123 #include <dev/pci/pcivar.h>
    124 #include <dev/pci/pcidevs.h>
    125 
    126 #include <dev/pci/if_wmreg.h>
    127 #include <dev/pci/if_wmvar.h>
    128 
    129 #ifdef WM_DEBUG
    130 #define	WM_DEBUG_LINK		0x01
    131 #define	WM_DEBUG_TX		0x02
    132 #define	WM_DEBUG_RX		0x04
    133 #define	WM_DEBUG_GMII		0x08
    134 #define	WM_DEBUG_MANAGE		0x10
    135 #define	WM_DEBUG_NVM		0x20
    136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    138 
    139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    140 #else
    141 #define	DPRINTF(x, y)	/* nothing */
    142 #endif /* WM_DEBUG */
    143 
    144 #ifdef NET_MPSAFE
    145 #define WM_MPSAFE	1
    146 #endif
    147 
    148 /*
    149  * Transmit descriptor list size.  Due to errata, we can only have
    150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    151  * on >= 82544.  We tell the upper layers that they can queue a lot
    152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    153  * of them at a time.
    154  *
    155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    156  * chains containing many small mbufs have been observed in zero-copy
    157  * situations with jumbo frames.
    158  */
    159 #define	WM_NTXSEGS		256
    160 #define	WM_IFQUEUELEN		256
    161 #define	WM_TXQUEUELEN_MAX	64
    162 #define	WM_TXQUEUELEN_MAX_82547	16
    163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    166 #define	WM_NTXDESC_82542	256
    167 #define	WM_NTXDESC_82544	4096
    168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    173 
    174 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    175 
    176 /*
    177  * Receive descriptor list size.  We have one Rx buffer for normal
    178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    179  * packet.  We allocate 256 receive descriptors, each with a 2k
    180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    181  */
    182 #define	WM_NRXDESC		256
    183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    186 
    187 /*
    188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    189  * a single clump that maps to a single DMA segment to make several things
    190  * easier.
    191  */
    192 struct wm_control_data_82544 {
    193 	/*
    194 	 * The receive descriptors.
    195 	 */
    196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    197 
    198 	/*
    199 	 * The transmit descriptors.  Put these at the end, because
    200 	 * we might use a smaller number of them.
    201 	 */
    202 	union {
    203 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    204 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    205 	} wdc_u;
    206 };
    207 
    208 struct wm_control_data_82542 {
    209 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    210 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    211 };
    212 
    213 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    214 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    215 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    216 
    217 /*
    218  * Software state for transmit jobs.
    219  */
    220 struct wm_txsoft {
    221 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    222 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    223 	int txs_firstdesc;		/* first descriptor in packet */
    224 	int txs_lastdesc;		/* last descriptor in packet */
    225 	int txs_ndesc;			/* # of descriptors used */
    226 };
    227 
    228 /*
    229  * Software state for receive buffers.  Each descriptor gets a
    230  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    231  * more than one buffer, we chain them together.
    232  */
    233 struct wm_rxsoft {
    234 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    235 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    236 };
    237 
    238 #define WM_LINKUP_TIMEOUT	50
    239 
    240 static uint16_t swfwphysem[] = {
    241 	SWFW_PHY0_SM,
    242 	SWFW_PHY1_SM,
    243 	SWFW_PHY2_SM,
    244 	SWFW_PHY3_SM
    245 };
    246 
    247 /*
    248  * Software state per device.
    249  */
    250 struct wm_softc {
    251 	device_t sc_dev;		/* generic device information */
    252 	bus_space_tag_t sc_st;		/* bus space tag */
    253 	bus_space_handle_t sc_sh;	/* bus space handle */
    254 	bus_size_t sc_ss;		/* bus space size */
    255 	bus_space_tag_t sc_iot;		/* I/O space tag */
    256 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    257 	bus_size_t sc_ios;		/* I/O space size */
    258 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    259 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    260 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    261 
    262 	struct ethercom sc_ethercom;	/* ethernet common data */
    263 	struct mii_data sc_mii;		/* MII/media information */
    264 
    265 	pci_chipset_tag_t sc_pc;
    266 	pcitag_t sc_pcitag;
    267 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    268 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
    269 
    270 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
    271 	wm_chip_type sc_type;		/* MAC type */
    272 	int sc_rev;			/* MAC revision */
    273 	wm_phy_type sc_phytype;		/* PHY type */
    274 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    275 	int sc_flags;			/* flags; see below */
    276 	int sc_if_flags;		/* last if_flags */
    277 	int sc_flowflags;		/* 802.3x flow control flags */
    278 	int sc_align_tweak;
    279 
    280 	void *sc_ih;			/* interrupt cookie */
    281 	callout_t sc_tick_ch;		/* tick callout */
    282 	bool sc_stopping;
    283 
    284 	int sc_ee_addrbits;		/* EEPROM address bits */
    285 	int sc_ich8_flash_base;
    286 	int sc_ich8_flash_bank_size;
    287 	int sc_nvm_k1_enabled;
    288 
    289 	/*
    290 	 * Software state for the transmit and receive descriptors.
    291 	 */
    292 	int sc_txnum;			/* must be a power of two */
    293 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    294 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    295 
    296 	/*
    297 	 * Control data structures.
    298 	 */
    299 	int sc_ntxdesc;			/* must be a power of two */
    300 	struct wm_control_data_82544 *sc_control_data;
    301 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    302 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    303 	int sc_cd_rseg;			/* real number of control segment */
    304 	size_t sc_cd_size;		/* control data size */
    305 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    306 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    307 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    308 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 	/* Event counters. */
    312 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    313 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    314 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    315 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    316 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    317 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    318 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    319 
    320 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    321 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    322 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    323 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    324 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    325 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    326 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    327 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    328 
    329 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    330 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    331 
    332 	struct evcnt sc_ev_tu;		/* Tx underrun */
    333 
    334 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    335 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    336 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    337 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    338 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    339 #endif /* WM_EVENT_COUNTERS */
    340 
    341 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    342 
    343 	int	sc_txfree;		/* number of free Tx descriptors */
    344 	int	sc_txnext;		/* next ready Tx descriptor */
    345 
    346 	int	sc_txsfree;		/* number of free Tx jobs */
    347 	int	sc_txsnext;		/* next free Tx job */
    348 	int	sc_txsdirty;		/* dirty Tx jobs */
    349 
    350 	/* These 5 variables are used only on the 82547. */
    351 	int	sc_txfifo_size;		/* Tx FIFO size */
    352 	int	sc_txfifo_head;		/* current head of FIFO */
    353 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    354 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    355 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    356 
    357 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    358 
    359 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    360 	int	sc_rxdiscard;
    361 	int	sc_rxlen;
    362 	struct mbuf *sc_rxhead;
    363 	struct mbuf *sc_rxtail;
    364 	struct mbuf **sc_rxtailp;
    365 
    366 	uint32_t sc_ctrl;		/* prototype CTRL register */
    367 #if 0
    368 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    369 #endif
    370 	uint32_t sc_icr;		/* prototype interrupt bits */
    371 	uint32_t sc_itr;		/* prototype intr throttling reg */
    372 	uint32_t sc_tctl;		/* prototype TCTL register */
    373 	uint32_t sc_rctl;		/* prototype RCTL register */
    374 	uint32_t sc_txcw;		/* prototype TXCW register */
    375 	uint32_t sc_tipg;		/* prototype TIPG register */
    376 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    377 	uint32_t sc_pba;		/* prototype PBA register */
    378 
    379 	int sc_tbi_linkup;		/* TBI link status */
    380 	int sc_tbi_anegticks;		/* autonegotiation ticks */
    381 	int sc_tbi_ticks;		/* tbi ticks */
    382 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
    383 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
    384 
    385 	int sc_mchash_type;		/* multicast filter offset */
    386 
    387 	krndsource_t rnd_source;	/* random source */
    388 
    389 	kmutex_t *sc_txrx_lock;		/* lock for tx/rx operations */
    390 					/* XXX need separation? */
    391 };
    392 
    393 #define WM_LOCK(_sc)	if ((_sc)->sc_txrx_lock) mutex_enter((_sc)->sc_txrx_lock)
    394 #define WM_UNLOCK(_sc)	if ((_sc)->sc_txrx_lock) mutex_exit((_sc)->sc_txrx_lock)
    395 #define WM_LOCKED(_sc)	(!(_sc)->sc_txrx_lock || mutex_owned((_sc)->sc_txrx_lock))
    396 
    397 #ifdef WM_MPSAFE
    398 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    399 #else
    400 #define CALLOUT_FLAGS	0
    401 #endif
    402 
    403 #define	WM_RXCHAIN_RESET(sc)						\
    404 do {									\
    405 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    406 	*(sc)->sc_rxtailp = NULL;					\
    407 	(sc)->sc_rxlen = 0;						\
    408 } while (/*CONSTCOND*/0)
    409 
    410 #define	WM_RXCHAIN_LINK(sc, m)						\
    411 do {									\
    412 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    413 	(sc)->sc_rxtailp = &(m)->m_next;				\
    414 } while (/*CONSTCOND*/0)
    415 
    416 #ifdef WM_EVENT_COUNTERS
    417 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    418 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    419 #else
    420 #define	WM_EVCNT_INCR(ev)	/* nothing */
    421 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    422 #endif
    423 
    424 #define	CSR_READ(sc, reg)						\
    425 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    426 #define	CSR_WRITE(sc, reg, val)						\
    427 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    428 #define	CSR_WRITE_FLUSH(sc)						\
    429 	(void) CSR_READ((sc), WMREG_STATUS)
    430 
    431 #define ICH8_FLASH_READ32(sc, reg) \
    432 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    433 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    434 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    435 
    436 #define ICH8_FLASH_READ16(sc, reg) \
    437 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    438 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    439 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    440 
    441 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    442 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    443 
    444 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    445 #define	WM_CDTXADDR_HI(sc, x)						\
    446 	(sizeof(bus_addr_t) == 8 ?					\
    447 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    448 
    449 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    450 #define	WM_CDRXADDR_HI(sc, x)						\
    451 	(sizeof(bus_addr_t) == 8 ?					\
    452 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    453 
    454 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    455 do {									\
    456 	int __x, __n;							\
    457 									\
    458 	__x = (x);							\
    459 	__n = (n);							\
    460 									\
    461 	/* If it will wrap around, sync to the end of the ring. */	\
    462 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    463 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    464 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    465 		    (WM_NTXDESC(sc) - __x), (ops));			\
    466 		__n -= (WM_NTXDESC(sc) - __x);				\
    467 		__x = 0;						\
    468 	}								\
    469 									\
    470 	/* Now sync whatever is left. */				\
    471 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    472 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    473 } while (/*CONSTCOND*/0)
    474 
    475 #define	WM_CDRXSYNC(sc, x, ops)						\
    476 do {									\
    477 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    478 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    479 } while (/*CONSTCOND*/0)
    480 
    481 #define	WM_INIT_RXDESC(sc, x)						\
    482 do {									\
    483 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    484 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    485 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    486 									\
    487 	/*								\
    488 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    489 	 * so that the payload after the Ethernet header is aligned	\
    490 	 * to a 4-byte boundary.					\
    491 	 *								\
    492 	 * XXX BRAINDAMAGE ALERT!					\
    493 	 * The stupid chip uses the same size for every buffer, which	\
    494 	 * is set in the Receive Control register.  We are using the 2K	\
    495 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    496 	 * reason, we can't "scoot" packets longer than the standard	\
    497 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    498 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    499 	 * the upper layer copy the headers.				\
    500 	 */								\
    501 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    502 									\
    503 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    504 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    505 	__rxd->wrx_len = 0;						\
    506 	__rxd->wrx_cksum = 0;						\
    507 	__rxd->wrx_status = 0;						\
    508 	__rxd->wrx_errors = 0;						\
    509 	__rxd->wrx_special = 0;						\
    510 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    511 									\
    512 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    513 } while (/*CONSTCOND*/0)
    514 
    515 static void	wm_start(struct ifnet *);
    516 static void	wm_start_locked(struct ifnet *);
    517 static void	wm_nq_start(struct ifnet *);
    518 static void	wm_nq_start_locked(struct ifnet *);
    519 static void	wm_watchdog(struct ifnet *);
    520 static int	wm_ifflags_cb(struct ethercom *);
    521 static int	wm_ioctl(struct ifnet *, u_long, void *);
    522 static int	wm_init(struct ifnet *);
    523 static int	wm_init_locked(struct ifnet *);
    524 static void	wm_stop(struct ifnet *, int);
    525 static void	wm_stop_locked(struct ifnet *, int);
    526 static bool	wm_suspend(device_t, const pmf_qual_t *);
    527 static bool	wm_resume(device_t, const pmf_qual_t *);
    528 
    529 static void	wm_reset(struct wm_softc *);
    530 static void	wm_rxdrain(struct wm_softc *);
    531 static int	wm_add_rxbuf(struct wm_softc *, int);
    532 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    533 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
    534 static int	wm_validate_eeprom_checksum(struct wm_softc *);
    535 static int	wm_check_alt_mac_addr(struct wm_softc *);
    536 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    537 static void	wm_tick(void *);
    538 
    539 static void	wm_set_filter(struct wm_softc *);
    540 static void	wm_set_vlan(struct wm_softc *);
    541 
    542 static int	wm_intr(void *);
    543 static void	wm_txintr(struct wm_softc *);
    544 static void	wm_rxintr(struct wm_softc *);
    545 static void	wm_linkintr(struct wm_softc *, uint32_t);
    546 
    547 static void	wm_tbi_mediainit(struct wm_softc *);
    548 static int	wm_tbi_mediachange(struct ifnet *);
    549 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    550 
    551 static void	wm_tbi_set_linkled(struct wm_softc *);
    552 static void	wm_tbi_check_link(struct wm_softc *);
    553 
    554 static void	wm_gmii_reset(struct wm_softc *);
    555 
    556 static int	wm_gmii_i82543_readreg(device_t, int, int);
    557 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    558 static int	wm_gmii_i82544_readreg(device_t, int, int);
    559 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    560 static int	wm_gmii_i80003_readreg(device_t, int, int);
    561 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    562 static int	wm_gmii_bm_readreg(device_t, int, int);
    563 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    564 static int	wm_gmii_hv_readreg(device_t, int, int);
    565 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    566 static int	wm_gmii_82580_readreg(device_t, int, int);
    567 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    568 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    569 static int	wm_sgmii_readreg(device_t, int, int);
    570 static void	wm_sgmii_writereg(device_t, int, int, int);
    571 
    572 static void	wm_gmii_statchg(struct ifnet *);
    573 
    574 static int	wm_get_phy_id_82575(struct wm_softc *);
    575 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    576 static int	wm_gmii_mediachange(struct ifnet *);
    577 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    578 
    579 static int	wm_kmrn_readreg(struct wm_softc *, int);
    580 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    581 
    582 static void	wm_set_spiaddrbits(struct wm_softc *);
    583 static int	wm_match(device_t, cfdata_t, void *);
    584 static void	wm_attach(device_t, device_t, void *);
    585 static int	wm_detach(device_t, int);
    586 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
    587 static void	wm_get_auto_rd_done(struct wm_softc *);
    588 static void	wm_lan_init_done(struct wm_softc *);
    589 static void	wm_get_cfg_done(struct wm_softc *);
    590 static int	wm_get_swsm_semaphore(struct wm_softc *);
    591 static void	wm_put_swsm_semaphore(struct wm_softc *);
    592 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    593 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    594 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    595 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    596 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    597 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    598 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    599 
    600 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
    601 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    602 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    603 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
    604 		     uint32_t, uint16_t *);
    605 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    606 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    607 static void	wm_82547_txfifo_stall(void *);
    608 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    609 static int	wm_check_mng_mode(struct wm_softc *);
    610 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    611 static int	wm_check_mng_mode_82574(struct wm_softc *);
    612 static int	wm_check_mng_mode_generic(struct wm_softc *);
    613 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    614 static int	wm_check_reset_block(struct wm_softc *);
    615 static void	wm_get_hw_control(struct wm_softc *);
    616 static int	wm_check_for_link(struct wm_softc *);
    617 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    618 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    619 #ifdef WM_WOL
    620 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    621 #endif
    622 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    623 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    624 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    625 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    626 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    627 static void	wm_smbustopci(struct wm_softc *);
    628 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    629 static void	wm_reset_init_script_82575(struct wm_softc *);
    630 static void	wm_release_manageability(struct wm_softc *);
    631 static void	wm_release_hw_control(struct wm_softc *);
    632 static void	wm_get_wakeup(struct wm_softc *);
    633 #ifdef WM_WOL
    634 static void	wm_enable_phy_wakeup(struct wm_softc *);
    635 static void	wm_enable_wakeup(struct wm_softc *);
    636 #endif
    637 static void	wm_init_manageability(struct wm_softc *);
    638 static void	wm_set_eee_i350(struct wm_softc *);
    639 
    640 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    641     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    642 
    643 /*
    644  * Devices supported by this driver.
    645  */
    646 static const struct wm_product {
    647 	pci_vendor_id_t		wmp_vendor;
    648 	pci_product_id_t	wmp_product;
    649 	const char		*wmp_name;
    650 	wm_chip_type		wmp_type;
    651 	int			wmp_flags;
    652 #define	WMP_F_1000X		0x01
    653 #define	WMP_F_1000T		0x02
    654 #define	WMP_F_SERDES		0x04
    655 } wm_products[] = {
    656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    657 	  "Intel i82542 1000BASE-X Ethernet",
    658 	  WM_T_82542_2_1,	WMP_F_1000X },
    659 
    660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    661 	  "Intel i82543GC 1000BASE-X Ethernet",
    662 	  WM_T_82543,		WMP_F_1000X },
    663 
    664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    665 	  "Intel i82543GC 1000BASE-T Ethernet",
    666 	  WM_T_82543,		WMP_F_1000T },
    667 
    668 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    669 	  "Intel i82544EI 1000BASE-T Ethernet",
    670 	  WM_T_82544,		WMP_F_1000T },
    671 
    672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    673 	  "Intel i82544EI 1000BASE-X Ethernet",
    674 	  WM_T_82544,		WMP_F_1000X },
    675 
    676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    677 	  "Intel i82544GC 1000BASE-T Ethernet",
    678 	  WM_T_82544,		WMP_F_1000T },
    679 
    680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    681 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    682 	  WM_T_82544,		WMP_F_1000T },
    683 
    684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    685 	  "Intel i82540EM 1000BASE-T Ethernet",
    686 	  WM_T_82540,		WMP_F_1000T },
    687 
    688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    689 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    690 	  WM_T_82540,		WMP_F_1000T },
    691 
    692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    693 	  "Intel i82540EP 1000BASE-T Ethernet",
    694 	  WM_T_82540,		WMP_F_1000T },
    695 
    696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    697 	  "Intel i82540EP 1000BASE-T Ethernet",
    698 	  WM_T_82540,		WMP_F_1000T },
    699 
    700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    701 	  "Intel i82540EP 1000BASE-T Ethernet",
    702 	  WM_T_82540,		WMP_F_1000T },
    703 
    704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    705 	  "Intel i82545EM 1000BASE-T Ethernet",
    706 	  WM_T_82545,		WMP_F_1000T },
    707 
    708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    709 	  "Intel i82545GM 1000BASE-T Ethernet",
    710 	  WM_T_82545_3,		WMP_F_1000T },
    711 
    712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    713 	  "Intel i82545GM 1000BASE-X Ethernet",
    714 	  WM_T_82545_3,		WMP_F_1000X },
    715 
    716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    717 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    718 	  WM_T_82545_3,		WMP_F_SERDES },
    719 
    720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    721 	  "Intel i82546EB 1000BASE-T Ethernet",
    722 	  WM_T_82546,		WMP_F_1000T },
    723 
    724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    725 	  "Intel i82546EB 1000BASE-T Ethernet",
    726 	  WM_T_82546,		WMP_F_1000T },
    727 
    728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    729 	  "Intel i82545EM 1000BASE-X Ethernet",
    730 	  WM_T_82545,		WMP_F_1000X },
    731 
    732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    733 	  "Intel i82546EB 1000BASE-X Ethernet",
    734 	  WM_T_82546,		WMP_F_1000X },
    735 
    736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    737 	  "Intel i82546GB 1000BASE-T Ethernet",
    738 	  WM_T_82546_3,		WMP_F_1000T },
    739 
    740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    741 	  "Intel i82546GB 1000BASE-X Ethernet",
    742 	  WM_T_82546_3,		WMP_F_1000X },
    743 
    744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    745 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    746 	  WM_T_82546_3,		WMP_F_SERDES },
    747 
    748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    749 	  "i82546GB quad-port Gigabit Ethernet",
    750 	  WM_T_82546_3,		WMP_F_1000T },
    751 
    752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    753 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    754 	  WM_T_82546_3,		WMP_F_1000T },
    755 
    756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    757 	  "Intel PRO/1000MT (82546GB)",
    758 	  WM_T_82546_3,		WMP_F_1000T },
    759 
    760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    761 	  "Intel i82541EI 1000BASE-T Ethernet",
    762 	  WM_T_82541,		WMP_F_1000T },
    763 
    764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    765 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    766 	  WM_T_82541,		WMP_F_1000T },
    767 
    768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    769 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    770 	  WM_T_82541,		WMP_F_1000T },
    771 
    772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    773 	  "Intel i82541ER 1000BASE-T Ethernet",
    774 	  WM_T_82541_2,		WMP_F_1000T },
    775 
    776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    777 	  "Intel i82541GI 1000BASE-T Ethernet",
    778 	  WM_T_82541_2,		WMP_F_1000T },
    779 
    780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    781 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    782 	  WM_T_82541_2,		WMP_F_1000T },
    783 
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    785 	  "Intel i82541PI 1000BASE-T Ethernet",
    786 	  WM_T_82541_2,		WMP_F_1000T },
    787 
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    789 	  "Intel i82547EI 1000BASE-T Ethernet",
    790 	  WM_T_82547,		WMP_F_1000T },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    793 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    794 	  WM_T_82547,		WMP_F_1000T },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    797 	  "Intel i82547GI 1000BASE-T Ethernet",
    798 	  WM_T_82547_2,		WMP_F_1000T },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    801 	  "Intel PRO/1000 PT (82571EB)",
    802 	  WM_T_82571,		WMP_F_1000T },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    805 	  "Intel PRO/1000 PF (82571EB)",
    806 	  WM_T_82571,		WMP_F_1000X },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    809 	  "Intel PRO/1000 PB (82571EB)",
    810 	  WM_T_82571,		WMP_F_SERDES },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    813 	  "Intel PRO/1000 QT (82571EB)",
    814 	  WM_T_82571,		WMP_F_1000T },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    817 	  "Intel i82572EI 1000baseT Ethernet",
    818 	  WM_T_82572,		WMP_F_1000T },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    821 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    822 	  WM_T_82571,		WMP_F_1000T, },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    825 	  "Intel i82572EI 1000baseX Ethernet",
    826 	  WM_T_82572,		WMP_F_1000X },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    829 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    830 	  WM_T_82572,		WMP_F_SERDES },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    833 	  "Intel i82572EI 1000baseT Ethernet",
    834 	  WM_T_82572,		WMP_F_1000T },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    837 	  "Intel i82573E",
    838 	  WM_T_82573,		WMP_F_1000T },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    841 	  "Intel i82573E IAMT",
    842 	  WM_T_82573,		WMP_F_1000T },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    845 	  "Intel i82573L Gigabit Ethernet",
    846 	  WM_T_82573,		WMP_F_1000T },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    849 	  "Intel i82574L",
    850 	  WM_T_82574,		WMP_F_1000T },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    853 	  "Intel i82583V",
    854 	  WM_T_82583,		WMP_F_1000T },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    857 	  "i80003 dual 1000baseT Ethernet",
    858 	  WM_T_80003,		WMP_F_1000T },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    861 	  "i80003 dual 1000baseX Ethernet",
    862 	  WM_T_80003,		WMP_F_1000T },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    865 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    866 	  WM_T_80003,		WMP_F_SERDES },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    869 	  "Intel i80003 1000baseT Ethernet",
    870 	  WM_T_80003,		WMP_F_1000T },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    873 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    874 	  WM_T_80003,		WMP_F_SERDES },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    877 	  "Intel i82801H (M_AMT) LAN Controller",
    878 	  WM_T_ICH8,		WMP_F_1000T },
    879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    880 	  "Intel i82801H (AMT) LAN Controller",
    881 	  WM_T_ICH8,		WMP_F_1000T },
    882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    883 	  "Intel i82801H LAN Controller",
    884 	  WM_T_ICH8,		WMP_F_1000T },
    885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    886 	  "Intel i82801H (IFE) LAN Controller",
    887 	  WM_T_ICH8,		WMP_F_1000T },
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    889 	  "Intel i82801H (M) LAN Controller",
    890 	  WM_T_ICH8,		WMP_F_1000T },
    891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    892 	  "Intel i82801H IFE (GT) LAN Controller",
    893 	  WM_T_ICH8,		WMP_F_1000T },
    894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    895 	  "Intel i82801H IFE (G) LAN Controller",
    896 	  WM_T_ICH8,		WMP_F_1000T },
    897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
    898 	  "82801I (AMT) LAN Controller",
    899 	  WM_T_ICH9,		WMP_F_1000T },
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
    901 	  "82801I LAN Controller",
    902 	  WM_T_ICH9,		WMP_F_1000T },
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
    904 	  "82801I (G) LAN Controller",
    905 	  WM_T_ICH9,		WMP_F_1000T },
    906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
    907 	  "82801I (GT) LAN Controller",
    908 	  WM_T_ICH9,		WMP_F_1000T },
    909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
    910 	  "82801I (C) LAN Controller",
    911 	  WM_T_ICH9,		WMP_F_1000T },
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
    913 	  "82801I mobile LAN Controller",
    914 	  WM_T_ICH9,		WMP_F_1000T },
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
    916 	  "82801I mobile (V) LAN Controller",
    917 	  WM_T_ICH9,		WMP_F_1000T },
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
    919 	  "82801I mobile (AMT) LAN Controller",
    920 	  WM_T_ICH9,		WMP_F_1000T },
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
    922 	  "82567LM-4 LAN Controller",
    923 	  WM_T_ICH9,		WMP_F_1000T },
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
    925 	  "82567V-3 LAN Controller",
    926 	  WM_T_ICH9,		WMP_F_1000T },
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
    928 	  "82567LM-2 LAN Controller",
    929 	  WM_T_ICH10,		WMP_F_1000T },
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
    931 	  "82567LF-2 LAN Controller",
    932 	  WM_T_ICH10,		WMP_F_1000T },
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
    934 	  "82567LM-3 LAN Controller",
    935 	  WM_T_ICH10,		WMP_F_1000T },
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
    937 	  "82567LF-3 LAN Controller",
    938 	  WM_T_ICH10,		WMP_F_1000T },
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
    940 	  "82567V-2 LAN Controller",
    941 	  WM_T_ICH10,		WMP_F_1000T },
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
    943 	  "82567V-3? LAN Controller",
    944 	  WM_T_ICH10,		WMP_F_1000T },
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
    946 	  "HANKSVILLE LAN Controller",
    947 	  WM_T_ICH10,		WMP_F_1000T },
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
    949 	  "PCH LAN (82577LM) Controller",
    950 	  WM_T_PCH,		WMP_F_1000T },
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
    952 	  "PCH LAN (82577LC) Controller",
    953 	  WM_T_PCH,		WMP_F_1000T },
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
    955 	  "PCH LAN (82578DM) Controller",
    956 	  WM_T_PCH,		WMP_F_1000T },
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
    958 	  "PCH LAN (82578DC) Controller",
    959 	  WM_T_PCH,		WMP_F_1000T },
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
    961 	  "PCH2 LAN (82579LM) Controller",
    962 	  WM_T_PCH2,		WMP_F_1000T },
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
    964 	  "PCH2 LAN (82579V) Controller",
    965 	  WM_T_PCH2,		WMP_F_1000T },
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
    967 	  "82575EB dual-1000baseT Ethernet",
    968 	  WM_T_82575,		WMP_F_1000T },
    969 #if 0
    970 	/*
    971 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
    972 	 * disabled for now ...
    973 	 */
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
    975 	  "82575EB dual-1000baseX Ethernet (SERDES)",
    976 	  WM_T_82575,		WMP_F_SERDES },
    977 #endif
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
    979 	  "82575GB quad-1000baseT Ethernet",
    980 	  WM_T_82575,		WMP_F_1000T },
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
    982 	  "82575GB quad-1000baseT Ethernet (PM)",
    983 	  WM_T_82575,		WMP_F_1000T },
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
    985 	  "82576 1000BaseT Ethernet",
    986 	  WM_T_82576,		WMP_F_1000T },
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
    988 	  "82576 1000BaseX Ethernet",
    989 	  WM_T_82576,		WMP_F_1000X },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
    992 	  "82576 gigabit Ethernet (SERDES)",
    993 	  WM_T_82576,		WMP_F_SERDES },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
    996 	  "82576 quad-1000BaseT Ethernet",
    997 	  WM_T_82576,		WMP_F_1000T },
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
    999 	  "82576 gigabit Ethernet",
   1000 	  WM_T_82576,		WMP_F_1000T },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1003 	  "82576 gigabit Ethernet (SERDES)",
   1004 	  WM_T_82576,		WMP_F_SERDES },
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1006 	  "82576 quad-gigabit Ethernet (SERDES)",
   1007 	  WM_T_82576,		WMP_F_SERDES },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1010 	  "82580 1000BaseT Ethernet",
   1011 	  WM_T_82580,		WMP_F_1000T },
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1013 	  "82580 1000BaseX Ethernet",
   1014 	  WM_T_82580,		WMP_F_1000X },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1017 	  "82580 1000BaseT Ethernet (SERDES)",
   1018 	  WM_T_82580,		WMP_F_SERDES },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1021 	  "82580 gigabit Ethernet (SGMII)",
   1022 	  WM_T_82580,		WMP_F_1000T },
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1024 	  "82580 dual-1000BaseT Ethernet",
   1025 	  WM_T_82580,		WMP_F_1000T },
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
   1027 	  "82580 1000BaseT Ethernet",
   1028 	  WM_T_82580ER,		WMP_F_1000T },
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
   1030 	  "82580 dual-1000BaseT Ethernet",
   1031 	  WM_T_82580ER,		WMP_F_1000T },
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1033 	  "82580 quad-1000BaseX Ethernet",
   1034 	  WM_T_82580,		WMP_F_1000X },
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1036 	  "I350 Gigabit Network Connection",
   1037 	  WM_T_I350,		WMP_F_1000T },
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1039 	  "I350 Gigabit Fiber Network Connection",
   1040 	  WM_T_I350,		WMP_F_1000X },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1043 	  "I350 Gigabit Backplane Connection",
   1044 	  WM_T_I350,		WMP_F_SERDES },
   1045 #if 0
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1047 	  "I350 Gigabit Connection",
   1048 	  WM_T_I350,		WMP_F_1000T },
   1049 #endif
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1051 	  "I354 Gigabit Connection",
   1052 	  WM_T_I354,		WMP_F_1000T },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1054 	  "I210-T1 Ethernet Server Adapter",
   1055 	  WM_T_I210,		WMP_F_1000T },
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1057 	  "I210 Ethernet (Copper OEM)",
   1058 	  WM_T_I210,		WMP_F_1000T },
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1060 	  "I210 Ethernet (Copper IT)",
   1061 	  WM_T_I210,		WMP_F_1000T },
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1063 	  "I210 Gigabit Ethernet (Fiber)",
   1064 	  WM_T_I210,		WMP_F_1000X },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1067 	  "I210 Gigabit Ethernet (SERDES)",
   1068 	  WM_T_I210,		WMP_F_SERDES },
   1069 #if 0
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1071 	  "I210 Gigabit Ethernet (SGMII)",
   1072 	  WM_T_I210,		WMP_F_SERDES },
   1073 #endif
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1075 	  "I211 Ethernet (COPPER)",
   1076 	  WM_T_I211,		WMP_F_1000T },
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1078 	  "I217 V Ethernet Connection",
   1079 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1081 	  "I217 LM Ethernet Connection",
   1082 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1084 	  "I218 V Ethernet Connection",
   1085 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1087 	  "I218 LM Ethernet Connection",
   1088 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1089 	{ 0,			0,
   1090 	  NULL,
   1091 	  0,			0 },
   1092 };
   1093 
   1094 #ifdef WM_EVENT_COUNTERS
   1095 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1096 #endif /* WM_EVENT_COUNTERS */
   1097 
   1098 #if 0 /* Not currently used */
   1099 static inline uint32_t
   1100 wm_io_read(struct wm_softc *sc, int reg)
   1101 {
   1102 
   1103 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1104 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1105 }
   1106 #endif
   1107 
   1108 static inline void
   1109 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1110 {
   1111 
   1112 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1113 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1114 }
   1115 
   1116 static inline void
   1117 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1118     uint32_t data)
   1119 {
   1120 	uint32_t regval;
   1121 	int i;
   1122 
   1123 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1124 
   1125 	CSR_WRITE(sc, reg, regval);
   1126 
   1127 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1128 		delay(5);
   1129 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1130 			break;
   1131 	}
   1132 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1133 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
   1134 		    device_xname(sc->sc_dev), reg);
   1135 	}
   1136 }
   1137 
   1138 static inline void
   1139 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1140 {
   1141 	wa->wa_low = htole32(v & 0xffffffffU);
   1142 	if (sizeof(bus_addr_t) == 8)
   1143 		wa->wa_high = htole32((uint64_t) v >> 32);
   1144 	else
   1145 		wa->wa_high = 0;
   1146 }
   1147 
   1148 static void
   1149 wm_set_spiaddrbits(struct wm_softc *sc)
   1150 {
   1151 	uint32_t reg;
   1152 
   1153 	sc->sc_flags |= WM_F_EEPROM_SPI;
   1154 	reg = CSR_READ(sc, WMREG_EECD);
   1155 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1156 }
   1157 
   1158 static const struct wm_product *
   1159 wm_lookup(const struct pci_attach_args *pa)
   1160 {
   1161 	const struct wm_product *wmp;
   1162 
   1163 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1164 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1165 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1166 			return wmp;
   1167 	}
   1168 	return NULL;
   1169 }
   1170 
   1171 static int
   1172 wm_match(device_t parent, cfdata_t cf, void *aux)
   1173 {
   1174 	struct pci_attach_args *pa = aux;
   1175 
   1176 	if (wm_lookup(pa) != NULL)
   1177 		return 1;
   1178 
   1179 	return 0;
   1180 }
   1181 
   1182 static void
   1183 wm_attach(device_t parent, device_t self, void *aux)
   1184 {
   1185 	struct wm_softc *sc = device_private(self);
   1186 	struct pci_attach_args *pa = aux;
   1187 	prop_dictionary_t dict;
   1188 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1189 	pci_chipset_tag_t pc = pa->pa_pc;
   1190 	pci_intr_handle_t ih;
   1191 	const char *intrstr = NULL;
   1192 	const char *eetype, *xname;
   1193 	bus_space_tag_t memt;
   1194 	bus_space_handle_t memh;
   1195 	bus_size_t memsize;
   1196 	int memh_valid;
   1197 	int i, error;
   1198 	const struct wm_product *wmp;
   1199 	prop_data_t ea;
   1200 	prop_number_t pn;
   1201 	uint8_t enaddr[ETHER_ADDR_LEN];
   1202 	uint16_t cfg1, cfg2, swdpin, io3;
   1203 	pcireg_t preg, memtype;
   1204 	uint16_t eeprom_data, apme_mask;
   1205 	bool force_clear_smbi;
   1206 	uint32_t reg;
   1207 	char intrbuf[PCI_INTRSTR_LEN];
   1208 
   1209 	sc->sc_dev = self;
   1210 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1211 	sc->sc_stopping = false;
   1212 
   1213 	sc->sc_wmp = wmp = wm_lookup(pa);
   1214 	if (wmp == NULL) {
   1215 		printf("\n");
   1216 		panic("wm_attach: impossible");
   1217 	}
   1218 
   1219 	sc->sc_pc = pa->pa_pc;
   1220 	sc->sc_pcitag = pa->pa_tag;
   1221 
   1222 	if (pci_dma64_available(pa))
   1223 		sc->sc_dmat = pa->pa_dmat64;
   1224 	else
   1225 		sc->sc_dmat = pa->pa_dmat;
   1226 
   1227 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1228 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1229 
   1230 	sc->sc_type = wmp->wmp_type;
   1231 	if (sc->sc_type < WM_T_82543) {
   1232 		if (sc->sc_rev < 2) {
   1233 			aprint_error_dev(sc->sc_dev,
   1234 			    "i82542 must be at least rev. 2\n");
   1235 			return;
   1236 		}
   1237 		if (sc->sc_rev < 3)
   1238 			sc->sc_type = WM_T_82542_2_0;
   1239 	}
   1240 
   1241 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1242 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1243 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1244 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1245 		sc->sc_flags |= WM_F_NEWQUEUE;
   1246 
   1247 	/* Set device properties (mactype) */
   1248 	dict = device_properties(sc->sc_dev);
   1249 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1250 
   1251 	/*
   1252 	 * Map the device.  All devices support memory-mapped acccess,
   1253 	 * and it is really required for normal operation.
   1254 	 */
   1255 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1256 	switch (memtype) {
   1257 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1258 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1259 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1260 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1261 		break;
   1262 	default:
   1263 		memh_valid = 0;
   1264 		break;
   1265 	}
   1266 
   1267 	if (memh_valid) {
   1268 		sc->sc_st = memt;
   1269 		sc->sc_sh = memh;
   1270 		sc->sc_ss = memsize;
   1271 	} else {
   1272 		aprint_error_dev(sc->sc_dev,
   1273 		    "unable to map device registers\n");
   1274 		return;
   1275 	}
   1276 
   1277 	/*
   1278 	 * In addition, i82544 and later support I/O mapped indirect
   1279 	 * register access.  It is not desirable (nor supported in
   1280 	 * this driver) to use it for normal operation, though it is
   1281 	 * required to work around bugs in some chip versions.
   1282 	 */
   1283 	if (sc->sc_type >= WM_T_82544) {
   1284 		/* First we have to find the I/O BAR. */
   1285 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1286 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1287 			if (memtype == PCI_MAPREG_TYPE_IO)
   1288 				break;
   1289 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1290 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1291 				i += 4;	/* skip high bits, too */
   1292 		}
   1293 		if (i < PCI_MAPREG_END) {
   1294 			/*
   1295 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1296 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1297 			 * It's no problem because newer chips has no this
   1298 			 * bug.
   1299 			 *
   1300 			 * The i8254x doesn't apparently respond when the
   1301 			 * I/O BAR is 0, which looks somewhat like it's not
   1302 			 * been configured.
   1303 			 */
   1304 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1305 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1306 				aprint_error_dev(sc->sc_dev,
   1307 				    "WARNING: I/O BAR at zero.\n");
   1308 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1309 					0, &sc->sc_iot, &sc->sc_ioh,
   1310 					NULL, &sc->sc_ios) == 0) {
   1311 				sc->sc_flags |= WM_F_IOH_VALID;
   1312 			} else {
   1313 				aprint_error_dev(sc->sc_dev,
   1314 				    "WARNING: unable to map I/O space\n");
   1315 			}
   1316 		}
   1317 
   1318 	}
   1319 
   1320 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1321 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1322 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1323 	if (sc->sc_type < WM_T_82542_2_1)
   1324 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1325 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1326 
   1327 	/* power up chip */
   1328 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1329 	    NULL)) && error != EOPNOTSUPP) {
   1330 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1331 		return;
   1332 	}
   1333 
   1334 	/*
   1335 	 * Map and establish our interrupt.
   1336 	 */
   1337 	if (pci_intr_map(pa, &ih)) {
   1338 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1339 		return;
   1340 	}
   1341 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1342 #ifdef WM_MPSAFE
   1343 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1344 #endif
   1345 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1346 	if (sc->sc_ih == NULL) {
   1347 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1348 		if (intrstr != NULL)
   1349 			aprint_error(" at %s", intrstr);
   1350 		aprint_error("\n");
   1351 		return;
   1352 	}
   1353 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1354 
   1355 	/*
   1356 	 * Check the function ID (unit number of the chip).
   1357 	 */
   1358 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1359 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1360 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1361 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1362 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1363 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1364 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1365 	else
   1366 		sc->sc_funcid = 0;
   1367 
   1368 	/*
   1369 	 * Determine a few things about the bus we're connected to.
   1370 	 */
   1371 	if (sc->sc_type < WM_T_82543) {
   1372 		/* We don't really know the bus characteristics here. */
   1373 		sc->sc_bus_speed = 33;
   1374 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1375 		/*
   1376 		 * CSA (Communication Streaming Architecture) is about as fast
   1377 		 * a 32-bit 66MHz PCI Bus.
   1378 		 */
   1379 		sc->sc_flags |= WM_F_CSA;
   1380 		sc->sc_bus_speed = 66;
   1381 		aprint_verbose_dev(sc->sc_dev,
   1382 		    "Communication Streaming Architecture\n");
   1383 		if (sc->sc_type == WM_T_82547) {
   1384 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1385 			callout_setfunc(&sc->sc_txfifo_ch,
   1386 					wm_82547_txfifo_stall, sc);
   1387 			aprint_verbose_dev(sc->sc_dev,
   1388 			    "using 82547 Tx FIFO stall work-around\n");
   1389 		}
   1390 	} else if (sc->sc_type >= WM_T_82571) {
   1391 		sc->sc_flags |= WM_F_PCIE;
   1392 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1393 		    && (sc->sc_type != WM_T_ICH10)
   1394 		    && (sc->sc_type != WM_T_PCH)
   1395 		    && (sc->sc_type != WM_T_PCH2)
   1396 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1397 			/* ICH* and PCH* have no PCIe capability registers */
   1398 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1399 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1400 				NULL) == 0)
   1401 				aprint_error_dev(sc->sc_dev,
   1402 				    "unable to find PCIe capability\n");
   1403 		}
   1404 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1405 	} else {
   1406 		reg = CSR_READ(sc, WMREG_STATUS);
   1407 		if (reg & STATUS_BUS64)
   1408 			sc->sc_flags |= WM_F_BUS64;
   1409 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1410 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1411 
   1412 			sc->sc_flags |= WM_F_PCIX;
   1413 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1414 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1415 				aprint_error_dev(sc->sc_dev,
   1416 				    "unable to find PCIX capability\n");
   1417 			else if (sc->sc_type != WM_T_82545_3 &&
   1418 				 sc->sc_type != WM_T_82546_3) {
   1419 				/*
   1420 				 * Work around a problem caused by the BIOS
   1421 				 * setting the max memory read byte count
   1422 				 * incorrectly.
   1423 				 */
   1424 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1425 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1426 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1427 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1428 
   1429 				bytecnt =
   1430 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1431 				    PCIX_CMD_BYTECNT_SHIFT;
   1432 				maxb =
   1433 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1434 				    PCIX_STATUS_MAXB_SHIFT;
   1435 				if (bytecnt > maxb) {
   1436 					aprint_verbose_dev(sc->sc_dev,
   1437 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1438 					    512 << bytecnt, 512 << maxb);
   1439 					pcix_cmd = (pcix_cmd &
   1440 					    ~PCIX_CMD_BYTECNT_MASK) |
   1441 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1442 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1443 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1444 					    pcix_cmd);
   1445 				}
   1446 			}
   1447 		}
   1448 		/*
   1449 		 * The quad port adapter is special; it has a PCIX-PCIX
   1450 		 * bridge on the board, and can run the secondary bus at
   1451 		 * a higher speed.
   1452 		 */
   1453 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1454 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1455 								      : 66;
   1456 		} else if (sc->sc_flags & WM_F_PCIX) {
   1457 			switch (reg & STATUS_PCIXSPD_MASK) {
   1458 			case STATUS_PCIXSPD_50_66:
   1459 				sc->sc_bus_speed = 66;
   1460 				break;
   1461 			case STATUS_PCIXSPD_66_100:
   1462 				sc->sc_bus_speed = 100;
   1463 				break;
   1464 			case STATUS_PCIXSPD_100_133:
   1465 				sc->sc_bus_speed = 133;
   1466 				break;
   1467 			default:
   1468 				aprint_error_dev(sc->sc_dev,
   1469 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1470 				    reg & STATUS_PCIXSPD_MASK);
   1471 				sc->sc_bus_speed = 66;
   1472 				break;
   1473 			}
   1474 		} else
   1475 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1476 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1477 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1478 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1479 	}
   1480 
   1481 	/*
   1482 	 * Allocate the control data structures, and create and load the
   1483 	 * DMA map for it.
   1484 	 *
   1485 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1486 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1487 	 * both sets within the same 4G segment.
   1488 	 */
   1489 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1490 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1491 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1492 	    sizeof(struct wm_control_data_82542) :
   1493 	    sizeof(struct wm_control_data_82544);
   1494 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1495 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1496 		    &sc->sc_cd_rseg, 0)) != 0) {
   1497 		aprint_error_dev(sc->sc_dev,
   1498 		    "unable to allocate control data, error = %d\n",
   1499 		    error);
   1500 		goto fail_0;
   1501 	}
   1502 
   1503 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1504 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1505 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1506 		aprint_error_dev(sc->sc_dev,
   1507 		    "unable to map control data, error = %d\n", error);
   1508 		goto fail_1;
   1509 	}
   1510 
   1511 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1512 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1513 		aprint_error_dev(sc->sc_dev,
   1514 		    "unable to create control data DMA map, error = %d\n",
   1515 		    error);
   1516 		goto fail_2;
   1517 	}
   1518 
   1519 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1520 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1521 		aprint_error_dev(sc->sc_dev,
   1522 		    "unable to load control data DMA map, error = %d\n",
   1523 		    error);
   1524 		goto fail_3;
   1525 	}
   1526 
   1527 	/*
   1528 	 * Create the transmit buffer DMA maps.
   1529 	 */
   1530 	WM_TXQUEUELEN(sc) =
   1531 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1532 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1533 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1534 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1535 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1536 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1537 			aprint_error_dev(sc->sc_dev,
   1538 			    "unable to create Tx DMA map %d, error = %d\n",
   1539 			    i, error);
   1540 			goto fail_4;
   1541 		}
   1542 	}
   1543 
   1544 	/*
   1545 	 * Create the receive buffer DMA maps.
   1546 	 */
   1547 	for (i = 0; i < WM_NRXDESC; i++) {
   1548 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1549 			    MCLBYTES, 0, 0,
   1550 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1551 			aprint_error_dev(sc->sc_dev,
   1552 			    "unable to create Rx DMA map %d error = %d\n",
   1553 			    i, error);
   1554 			goto fail_5;
   1555 		}
   1556 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1557 	}
   1558 
   1559 	/* clear interesting stat counters */
   1560 	CSR_READ(sc, WMREG_COLC);
   1561 	CSR_READ(sc, WMREG_RXERRC);
   1562 
   1563 	/* get PHY control from SMBus to PCIe */
   1564 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1565 	    || (sc->sc_type == WM_T_PCH_LPT))
   1566 		wm_smbustopci(sc);
   1567 
   1568 	/*
   1569 	 * Reset the chip to a known state.
   1570 	 */
   1571 	wm_reset(sc);
   1572 
   1573 	/*
   1574 	 * Get some information about the EEPROM.
   1575 	 */
   1576 	switch (sc->sc_type) {
   1577 	case WM_T_82542_2_0:
   1578 	case WM_T_82542_2_1:
   1579 	case WM_T_82543:
   1580 	case WM_T_82544:
   1581 		/* Microwire */
   1582 		sc->sc_ee_addrbits = 6;
   1583 		break;
   1584 	case WM_T_82540:
   1585 	case WM_T_82545:
   1586 	case WM_T_82545_3:
   1587 	case WM_T_82546:
   1588 	case WM_T_82546_3:
   1589 		/* Microwire */
   1590 		reg = CSR_READ(sc, WMREG_EECD);
   1591 		if (reg & EECD_EE_SIZE)
   1592 			sc->sc_ee_addrbits = 8;
   1593 		else
   1594 			sc->sc_ee_addrbits = 6;
   1595 		sc->sc_flags |= WM_F_LOCK_EECD;
   1596 		break;
   1597 	case WM_T_82541:
   1598 	case WM_T_82541_2:
   1599 	case WM_T_82547:
   1600 	case WM_T_82547_2:
   1601 		reg = CSR_READ(sc, WMREG_EECD);
   1602 		if (reg & EECD_EE_TYPE) {
   1603 			/* SPI */
   1604 			wm_set_spiaddrbits(sc);
   1605 		} else
   1606 			/* Microwire */
   1607 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1608 		sc->sc_flags |= WM_F_LOCK_EECD;
   1609 		break;
   1610 	case WM_T_82571:
   1611 	case WM_T_82572:
   1612 		/* SPI */
   1613 		wm_set_spiaddrbits(sc);
   1614 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1615 		break;
   1616 	case WM_T_82573:
   1617 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1618 		/* FALLTHROUGH */
   1619 	case WM_T_82574:
   1620 	case WM_T_82583:
   1621 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
   1622 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1623 		else {
   1624 			/* SPI */
   1625 			wm_set_spiaddrbits(sc);
   1626 		}
   1627 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1628 		break;
   1629 	case WM_T_82575:
   1630 	case WM_T_82576:
   1631 	case WM_T_82580:
   1632 	case WM_T_82580ER:
   1633 	case WM_T_I350:
   1634 	case WM_T_I354:
   1635 	case WM_T_80003:
   1636 		/* SPI */
   1637 		wm_set_spiaddrbits(sc);
   1638 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1639 		    | WM_F_LOCK_SWSM;
   1640 		break;
   1641 	case WM_T_ICH8:
   1642 	case WM_T_ICH9:
   1643 	case WM_T_ICH10:
   1644 	case WM_T_PCH:
   1645 	case WM_T_PCH2:
   1646 	case WM_T_PCH_LPT:
   1647 		/* FLASH */
   1648 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1649 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1650 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1651 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1652 			aprint_error_dev(sc->sc_dev,
   1653 			    "can't map FLASH registers\n");
   1654 			return;
   1655 		}
   1656 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1657 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1658 						ICH_FLASH_SECTOR_SIZE;
   1659 		sc->sc_ich8_flash_bank_size =
   1660 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1661 		sc->sc_ich8_flash_bank_size -=
   1662 		    (reg & ICH_GFPREG_BASE_MASK);
   1663 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1664 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1665 		break;
   1666 	case WM_T_I210:
   1667 	case WM_T_I211:
   1668 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1669 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1670 		break;
   1671 	default:
   1672 		break;
   1673 	}
   1674 
   1675 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1676 	switch (sc->sc_type) {
   1677 	case WM_T_82571:
   1678 	case WM_T_82572:
   1679 		reg = CSR_READ(sc, WMREG_SWSM2);
   1680 		if ((reg & SWSM2_LOCK) != 0) {
   1681 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1682 			force_clear_smbi = true;
   1683 		} else
   1684 			force_clear_smbi = false;
   1685 		break;
   1686 	default:
   1687 		force_clear_smbi = true;
   1688 		break;
   1689 	}
   1690 	if (force_clear_smbi) {
   1691 		reg = CSR_READ(sc, WMREG_SWSM);
   1692 		if ((reg & ~SWSM_SMBI) != 0)
   1693 			aprint_error_dev(sc->sc_dev,
   1694 			    "Please update the Bootagent\n");
   1695 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1696 	}
   1697 
   1698 	/*
   1699 	 * Defer printing the EEPROM type until after verifying the checksum
   1700 	 * This allows the EEPROM type to be printed correctly in the case
   1701 	 * that no EEPROM is attached.
   1702 	 */
   1703 	/*
   1704 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1705 	 * this for later, so we can fail future reads from the EEPROM.
   1706 	 */
   1707 	if (wm_validate_eeprom_checksum(sc)) {
   1708 		/*
   1709 		 * Read twice again because some PCI-e parts fail the
   1710 		 * first check due to the link being in sleep state.
   1711 		 */
   1712 		if (wm_validate_eeprom_checksum(sc))
   1713 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1714 	}
   1715 
   1716 	/* Set device properties (macflags) */
   1717 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1718 
   1719 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1720 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
   1721 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
   1722 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
   1723 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1724 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
   1725 	} else {
   1726 		if (sc->sc_flags & WM_F_EEPROM_SPI)
   1727 			eetype = "SPI";
   1728 		else
   1729 			eetype = "MicroWire";
   1730 		aprint_verbose_dev(sc->sc_dev,
   1731 		    "%u word (%d address bits) %s EEPROM\n",
   1732 		    1U << sc->sc_ee_addrbits,
   1733 		    sc->sc_ee_addrbits, eetype);
   1734 	}
   1735 
   1736 	switch (sc->sc_type) {
   1737 	case WM_T_82571:
   1738 	case WM_T_82572:
   1739 	case WM_T_82573:
   1740 	case WM_T_82574:
   1741 	case WM_T_82583:
   1742 	case WM_T_80003:
   1743 	case WM_T_ICH8:
   1744 	case WM_T_ICH9:
   1745 	case WM_T_ICH10:
   1746 	case WM_T_PCH:
   1747 	case WM_T_PCH2:
   1748 	case WM_T_PCH_LPT:
   1749 		if (wm_check_mng_mode(sc) != 0)
   1750 			wm_get_hw_control(sc);
   1751 		break;
   1752 	default:
   1753 		break;
   1754 	}
   1755 	wm_get_wakeup(sc);
   1756 	/*
   1757 	 * Read the Ethernet address from the EEPROM, if not first found
   1758 	 * in device properties.
   1759 	 */
   1760 	ea = prop_dictionary_get(dict, "mac-address");
   1761 	if (ea != NULL) {
   1762 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1763 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1764 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1765 	} else {
   1766 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1767 			aprint_error_dev(sc->sc_dev,
   1768 			    "unable to read Ethernet address\n");
   1769 			return;
   1770 		}
   1771 	}
   1772 
   1773 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   1774 	    ether_sprintf(enaddr));
   1775 
   1776 	/*
   1777 	 * Read the config info from the EEPROM, and set up various
   1778 	 * bits in the control registers based on their contents.
   1779 	 */
   1780 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   1781 	if (pn != NULL) {
   1782 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1783 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1784 	} else {
   1785 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1786 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   1787 			return;
   1788 		}
   1789 	}
   1790 
   1791 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   1792 	if (pn != NULL) {
   1793 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1794 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1795 	} else {
   1796 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1797 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   1798 			return;
   1799 		}
   1800 	}
   1801 
   1802 	/* check for WM_F_WOL */
   1803 	switch (sc->sc_type) {
   1804 	case WM_T_82542_2_0:
   1805 	case WM_T_82542_2_1:
   1806 	case WM_T_82543:
   1807 		/* dummy? */
   1808 		eeprom_data = 0;
   1809 		apme_mask = EEPROM_CFG3_APME;
   1810 		break;
   1811 	case WM_T_82544:
   1812 		apme_mask = EEPROM_CFG2_82544_APM_EN;
   1813 		eeprom_data = cfg2;
   1814 		break;
   1815 	case WM_T_82546:
   1816 	case WM_T_82546_3:
   1817 	case WM_T_82571:
   1818 	case WM_T_82572:
   1819 	case WM_T_82573:
   1820 	case WM_T_82574:
   1821 	case WM_T_82583:
   1822 	case WM_T_80003:
   1823 	default:
   1824 		apme_mask = EEPROM_CFG3_APME;
   1825 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
   1826 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
   1827 		break;
   1828 	case WM_T_82575:
   1829 	case WM_T_82576:
   1830 	case WM_T_82580:
   1831 	case WM_T_82580ER:
   1832 	case WM_T_I350:
   1833 	case WM_T_I354: /* XXX ok? */
   1834 	case WM_T_ICH8:
   1835 	case WM_T_ICH9:
   1836 	case WM_T_ICH10:
   1837 	case WM_T_PCH:
   1838 	case WM_T_PCH2:
   1839 	case WM_T_PCH_LPT:
   1840 		/* XXX The funcid should be checked on some devices */
   1841 		apme_mask = WUC_APME;
   1842 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   1843 		break;
   1844 	}
   1845 
   1846 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   1847 	if ((eeprom_data & apme_mask) != 0)
   1848 		sc->sc_flags |= WM_F_WOL;
   1849 #ifdef WM_DEBUG
   1850 	if ((sc->sc_flags & WM_F_WOL) != 0)
   1851 		printf("WOL\n");
   1852 #endif
   1853 
   1854 	/*
   1855 	 * XXX need special handling for some multiple port cards
   1856 	 * to disable a paticular port.
   1857 	 */
   1858 
   1859 	if (sc->sc_type >= WM_T_82544) {
   1860 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   1861 		if (pn != NULL) {
   1862 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1863 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1864 		} else {
   1865 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "unable to read SWDPIN\n");
   1868 				return;
   1869 			}
   1870 		}
   1871 	}
   1872 
   1873 	if (cfg1 & EEPROM_CFG1_ILOS)
   1874 		sc->sc_ctrl |= CTRL_ILOS;
   1875 	if (sc->sc_type >= WM_T_82544) {
   1876 		sc->sc_ctrl |=
   1877 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1878 		    CTRL_SWDPIO_SHIFT;
   1879 		sc->sc_ctrl |=
   1880 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1881 		    CTRL_SWDPINS_SHIFT;
   1882 	} else {
   1883 		sc->sc_ctrl |=
   1884 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1885 		    CTRL_SWDPIO_SHIFT;
   1886 	}
   1887 
   1888 #if 0
   1889 	if (sc->sc_type >= WM_T_82544) {
   1890 		if (cfg1 & EEPROM_CFG1_IPS0)
   1891 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1892 		if (cfg1 & EEPROM_CFG1_IPS1)
   1893 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1894 		sc->sc_ctrl_ext |=
   1895 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1896 		    CTRL_EXT_SWDPIO_SHIFT;
   1897 		sc->sc_ctrl_ext |=
   1898 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1899 		    CTRL_EXT_SWDPINS_SHIFT;
   1900 	} else {
   1901 		sc->sc_ctrl_ext |=
   1902 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1903 		    CTRL_EXT_SWDPIO_SHIFT;
   1904 	}
   1905 #endif
   1906 
   1907 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1908 #if 0
   1909 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1910 #endif
   1911 
   1912 	/*
   1913 	 * Set up some register offsets that are different between
   1914 	 * the i82542 and the i82543 and later chips.
   1915 	 */
   1916 	if (sc->sc_type < WM_T_82543) {
   1917 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1918 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1919 	} else {
   1920 		sc->sc_rdt_reg = WMREG_RDT;
   1921 		sc->sc_tdt_reg = WMREG_TDT;
   1922 	}
   1923 
   1924 	if (sc->sc_type == WM_T_PCH) {
   1925 		uint16_t val;
   1926 
   1927 		/* Save the NVM K1 bit setting */
   1928 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
   1929 
   1930 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
   1931 			sc->sc_nvm_k1_enabled = 1;
   1932 		else
   1933 			sc->sc_nvm_k1_enabled = 0;
   1934 	}
   1935 
   1936 	/*
   1937 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   1938 	 * media structures accordingly.
   1939 	 */
   1940 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   1941 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   1942 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   1943 	    || sc->sc_type == WM_T_82573
   1944 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   1945 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   1946 		wm_gmii_mediainit(sc, wmp->wmp_product);
   1947 	} else if (sc->sc_type < WM_T_82543 ||
   1948 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1949 		if (wmp->wmp_flags & WMP_F_1000T)
   1950 			aprint_error_dev(sc->sc_dev,
   1951 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   1952 		wm_tbi_mediainit(sc);
   1953 	} else {
   1954 		switch (sc->sc_type) {
   1955 		case WM_T_82575:
   1956 		case WM_T_82576:
   1957 		case WM_T_82580:
   1958 		case WM_T_82580ER:
   1959 		case WM_T_I350:
   1960 		case WM_T_I354:
   1961 		case WM_T_I210:
   1962 		case WM_T_I211:
   1963 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   1964 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
   1965 			case CTRL_EXT_LINK_MODE_1000KX:
   1966 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   1967 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   1968 				    reg | CTRL_EXT_I2C_ENA);
   1969 				panic("not supported yet\n");
   1970 				break;
   1971 			case CTRL_EXT_LINK_MODE_SGMII:
   1972 				if (wm_sgmii_uses_mdio(sc)) {
   1973 					aprint_verbose_dev(sc->sc_dev,
   1974 					    "SGMII(MDIO)\n");
   1975 					sc->sc_flags |= WM_F_SGMII;
   1976 					wm_gmii_mediainit(sc,
   1977 					    wmp->wmp_product);
   1978 					break;
   1979 				}
   1980 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   1981 				/*FALLTHROUGH*/
   1982 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   1983 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   1984 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   1985 				    reg | CTRL_EXT_I2C_ENA);
   1986 				panic("not supported yet\n");
   1987 				break;
   1988 			case CTRL_EXT_LINK_MODE_GMII:
   1989 			default:
   1990 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   1991 				    reg & ~CTRL_EXT_I2C_ENA);
   1992 				wm_gmii_mediainit(sc, wmp->wmp_product);
   1993 				break;
   1994 			}
   1995 			break;
   1996 		default:
   1997 			if (wmp->wmp_flags & WMP_F_1000X)
   1998 				aprint_error_dev(sc->sc_dev,
   1999 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2000 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2001 		}
   2002 	}
   2003 
   2004 	ifp = &sc->sc_ethercom.ec_if;
   2005 	xname = device_xname(sc->sc_dev);
   2006 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2007 	ifp->if_softc = sc;
   2008 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2009 	ifp->if_ioctl = wm_ioctl;
   2010 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2011 		ifp->if_start = wm_nq_start;
   2012 	else
   2013 		ifp->if_start = wm_start;
   2014 	ifp->if_watchdog = wm_watchdog;
   2015 	ifp->if_init = wm_init;
   2016 	ifp->if_stop = wm_stop;
   2017 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2018 	IFQ_SET_READY(&ifp->if_snd);
   2019 
   2020 	/* Check for jumbo frame */
   2021 	switch (sc->sc_type) {
   2022 	case WM_T_82573:
   2023 		/* XXX limited to 9234 if ASPM is disabled */
   2024 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
   2025 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
   2026 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2027 		break;
   2028 	case WM_T_82571:
   2029 	case WM_T_82572:
   2030 	case WM_T_82574:
   2031 	case WM_T_82575:
   2032 	case WM_T_82576:
   2033 	case WM_T_82580:
   2034 	case WM_T_82580ER:
   2035 	case WM_T_I350:
   2036 	case WM_T_I354: /* XXXX ok? */
   2037 	case WM_T_I210:
   2038 	case WM_T_I211:
   2039 	case WM_T_80003:
   2040 	case WM_T_ICH9:
   2041 	case WM_T_ICH10:
   2042 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2043 	case WM_T_PCH_LPT:
   2044 		/* XXX limited to 9234 */
   2045 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2046 		break;
   2047 	case WM_T_PCH:
   2048 		/* XXX limited to 4096 */
   2049 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2050 		break;
   2051 	case WM_T_82542_2_0:
   2052 	case WM_T_82542_2_1:
   2053 	case WM_T_82583:
   2054 	case WM_T_ICH8:
   2055 		/* No support for jumbo frame */
   2056 		break;
   2057 	default:
   2058 		/* ETHER_MAX_LEN_JUMBO */
   2059 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2060 		break;
   2061 	}
   2062 
   2063 	/*
   2064 	 * If we're a i82543 or greater, we can support VLANs.
   2065 	 */
   2066 	if (sc->sc_type >= WM_T_82543)
   2067 		sc->sc_ethercom.ec_capabilities |=
   2068 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2069 
   2070 	/*
   2071 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2072 	 * on i82543 and later.
   2073 	 */
   2074 	if (sc->sc_type >= WM_T_82543) {
   2075 		ifp->if_capabilities |=
   2076 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2077 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2078 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2079 		    IFCAP_CSUM_TCPv6_Tx |
   2080 		    IFCAP_CSUM_UDPv6_Tx;
   2081 	}
   2082 
   2083 	/*
   2084 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2085 	 *
   2086 	 *	82541GI (8086:1076) ... no
   2087 	 *	82572EI (8086:10b9) ... yes
   2088 	 */
   2089 	if (sc->sc_type >= WM_T_82571) {
   2090 		ifp->if_capabilities |=
   2091 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2092 	}
   2093 
   2094 	/*
   2095 	 * If we're a i82544 or greater (except i82547), we can do
   2096 	 * TCP segmentation offload.
   2097 	 */
   2098 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2099 		ifp->if_capabilities |= IFCAP_TSOv4;
   2100 	}
   2101 
   2102 	if (sc->sc_type >= WM_T_82571) {
   2103 		ifp->if_capabilities |= IFCAP_TSOv6;
   2104 	}
   2105 
   2106 #ifdef WM_MPSAFE
   2107 	sc->sc_txrx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2108 #else
   2109 	sc->sc_txrx_lock = NULL;
   2110 #endif
   2111 
   2112 	/*
   2113 	 * Attach the interface.
   2114 	 */
   2115 	if_attach(ifp);
   2116 	ether_ifattach(ifp, enaddr);
   2117 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2118 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
   2119 
   2120 #ifdef WM_EVENT_COUNTERS
   2121 	/* Attach event counters. */
   2122 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2123 	    NULL, xname, "txsstall");
   2124 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2125 	    NULL, xname, "txdstall");
   2126 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2127 	    NULL, xname, "txfifo_stall");
   2128 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2129 	    NULL, xname, "txdw");
   2130 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2131 	    NULL, xname, "txqe");
   2132 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2133 	    NULL, xname, "rxintr");
   2134 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2135 	    NULL, xname, "linkintr");
   2136 
   2137 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2138 	    NULL, xname, "rxipsum");
   2139 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2140 	    NULL, xname, "rxtusum");
   2141 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2142 	    NULL, xname, "txipsum");
   2143 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2144 	    NULL, xname, "txtusum");
   2145 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2146 	    NULL, xname, "txtusum6");
   2147 
   2148 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2149 	    NULL, xname, "txtso");
   2150 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2151 	    NULL, xname, "txtso6");
   2152 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2153 	    NULL, xname, "txtsopain");
   2154 
   2155 	for (i = 0; i < WM_NTXSEGS; i++) {
   2156 		snprintf(wm_txseg_evcnt_names[i],
   2157 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2158 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2159 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2160 	}
   2161 
   2162 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2163 	    NULL, xname, "txdrop");
   2164 
   2165 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2166 	    NULL, xname, "tu");
   2167 
   2168 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2169 	    NULL, xname, "tx_xoff");
   2170 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2171 	    NULL, xname, "tx_xon");
   2172 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2173 	    NULL, xname, "rx_xoff");
   2174 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2175 	    NULL, xname, "rx_xon");
   2176 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2177 	    NULL, xname, "rx_macctl");
   2178 #endif /* WM_EVENT_COUNTERS */
   2179 
   2180 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2181 		pmf_class_network_register(self, ifp);
   2182 	else
   2183 		aprint_error_dev(self, "couldn't establish power handler\n");
   2184 
   2185 	return;
   2186 
   2187 	/*
   2188 	 * Free any resources we've allocated during the failed attach
   2189 	 * attempt.  Do this in reverse order and fall through.
   2190 	 */
   2191  fail_5:
   2192 	for (i = 0; i < WM_NRXDESC; i++) {
   2193 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2194 			bus_dmamap_destroy(sc->sc_dmat,
   2195 			    sc->sc_rxsoft[i].rxs_dmamap);
   2196 	}
   2197  fail_4:
   2198 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2199 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2200 			bus_dmamap_destroy(sc->sc_dmat,
   2201 			    sc->sc_txsoft[i].txs_dmamap);
   2202 	}
   2203 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2204  fail_3:
   2205 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2206  fail_2:
   2207 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2208 	    sc->sc_cd_size);
   2209  fail_1:
   2210 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2211  fail_0:
   2212 	return;
   2213 }
   2214 
   2215 static int
   2216 wm_detach(device_t self, int flags __unused)
   2217 {
   2218 	struct wm_softc *sc = device_private(self);
   2219 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2220 	int i;
   2221 #ifndef WM_MPSAFE
   2222 	int s;
   2223 
   2224 	s = splnet();
   2225 #endif
   2226 	/* Stop the interface. Callouts are stopped in it. */
   2227 	wm_stop(ifp, 1);
   2228 
   2229 #ifndef WM_MPSAFE
   2230 	splx(s);
   2231 #endif
   2232 
   2233 	pmf_device_deregister(self);
   2234 
   2235 	/* Tell the firmware about the release */
   2236 	WM_LOCK(sc);
   2237 	wm_release_manageability(sc);
   2238 	wm_release_hw_control(sc);
   2239 	WM_UNLOCK(sc);
   2240 
   2241 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2242 
   2243 	/* Delete all remaining media. */
   2244 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2245 
   2246 	ether_ifdetach(ifp);
   2247 	if_detach(ifp);
   2248 
   2249 
   2250 	/* Unload RX dmamaps and free mbufs */
   2251 	WM_LOCK(sc);
   2252 	wm_rxdrain(sc);
   2253 	WM_UNLOCK(sc);
   2254 	/* Must unlock here */
   2255 
   2256 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2257 	for (i = 0; i < WM_NRXDESC; i++) {
   2258 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2259 			bus_dmamap_destroy(sc->sc_dmat,
   2260 			    sc->sc_rxsoft[i].rxs_dmamap);
   2261 	}
   2262 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2263 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2264 			bus_dmamap_destroy(sc->sc_dmat,
   2265 			    sc->sc_txsoft[i].txs_dmamap);
   2266 	}
   2267 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2268 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2269 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2270 	    sc->sc_cd_size);
   2271 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2272 
   2273 	/* Disestablish the interrupt handler */
   2274 	if (sc->sc_ih != NULL) {
   2275 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2276 		sc->sc_ih = NULL;
   2277 	}
   2278 
   2279 	/* Unmap the registers */
   2280 	if (sc->sc_ss) {
   2281 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2282 		sc->sc_ss = 0;
   2283 	}
   2284 
   2285 	if (sc->sc_ios) {
   2286 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2287 		sc->sc_ios = 0;
   2288 	}
   2289 
   2290 	if (sc->sc_txrx_lock)
   2291 		mutex_obj_free(sc->sc_txrx_lock);
   2292 
   2293 	return 0;
   2294 }
   2295 
   2296 /*
   2297  * wm_tx_offload:
   2298  *
   2299  *	Set up TCP/IP checksumming parameters for the
   2300  *	specified packet.
   2301  */
   2302 static int
   2303 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   2304     uint8_t *fieldsp)
   2305 {
   2306 	struct mbuf *m0 = txs->txs_mbuf;
   2307 	struct livengood_tcpip_ctxdesc *t;
   2308 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   2309 	uint32_t ipcse;
   2310 	struct ether_header *eh;
   2311 	int offset, iphl;
   2312 	uint8_t fields;
   2313 
   2314 	/*
   2315 	 * XXX It would be nice if the mbuf pkthdr had offset
   2316 	 * fields for the protocol headers.
   2317 	 */
   2318 
   2319 	eh = mtod(m0, struct ether_header *);
   2320 	switch (htons(eh->ether_type)) {
   2321 	case ETHERTYPE_IP:
   2322 	case ETHERTYPE_IPV6:
   2323 		offset = ETHER_HDR_LEN;
   2324 		break;
   2325 
   2326 	case ETHERTYPE_VLAN:
   2327 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2328 		break;
   2329 
   2330 	default:
   2331 		/*
   2332 		 * Don't support this protocol or encapsulation.
   2333 		 */
   2334 		*fieldsp = 0;
   2335 		*cmdp = 0;
   2336 		return 0;
   2337 	}
   2338 
   2339 	if ((m0->m_pkthdr.csum_flags &
   2340 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   2341 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   2342 	} else {
   2343 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   2344 	}
   2345 	ipcse = offset + iphl - 1;
   2346 
   2347 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   2348 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   2349 	seg = 0;
   2350 	fields = 0;
   2351 
   2352 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   2353 		int hlen = offset + iphl;
   2354 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   2355 
   2356 		if (__predict_false(m0->m_len <
   2357 				    (hlen + sizeof(struct tcphdr)))) {
   2358 			/*
   2359 			 * TCP/IP headers are not in the first mbuf; we need
   2360 			 * to do this the slow and painful way.  Let's just
   2361 			 * hope this doesn't happen very often.
   2362 			 */
   2363 			struct tcphdr th;
   2364 
   2365 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   2366 
   2367 			m_copydata(m0, hlen, sizeof(th), &th);
   2368 			if (v4) {
   2369 				struct ip ip;
   2370 
   2371 				m_copydata(m0, offset, sizeof(ip), &ip);
   2372 				ip.ip_len = 0;
   2373 				m_copyback(m0,
   2374 				    offset + offsetof(struct ip, ip_len),
   2375 				    sizeof(ip.ip_len), &ip.ip_len);
   2376 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   2377 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   2378 			} else {
   2379 				struct ip6_hdr ip6;
   2380 
   2381 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   2382 				ip6.ip6_plen = 0;
   2383 				m_copyback(m0,
   2384 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   2385 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   2386 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   2387 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   2388 			}
   2389 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   2390 			    sizeof(th.th_sum), &th.th_sum);
   2391 
   2392 			hlen += th.th_off << 2;
   2393 		} else {
   2394 			/*
   2395 			 * TCP/IP headers are in the first mbuf; we can do
   2396 			 * this the easy way.
   2397 			 */
   2398 			struct tcphdr *th;
   2399 
   2400 			if (v4) {
   2401 				struct ip *ip =
   2402 				    (void *)(mtod(m0, char *) + offset);
   2403 				th = (void *)(mtod(m0, char *) + hlen);
   2404 
   2405 				ip->ip_len = 0;
   2406 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2407 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2408 			} else {
   2409 				struct ip6_hdr *ip6 =
   2410 				    (void *)(mtod(m0, char *) + offset);
   2411 				th = (void *)(mtod(m0, char *) + hlen);
   2412 
   2413 				ip6->ip6_plen = 0;
   2414 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   2415 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   2416 			}
   2417 			hlen += th->th_off << 2;
   2418 		}
   2419 
   2420 		if (v4) {
   2421 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   2422 			cmdlen |= WTX_TCPIP_CMD_IP;
   2423 		} else {
   2424 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   2425 			ipcse = 0;
   2426 		}
   2427 		cmd |= WTX_TCPIP_CMD_TSE;
   2428 		cmdlen |= WTX_TCPIP_CMD_TSE |
   2429 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   2430 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   2431 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   2432 	}
   2433 
   2434 	/*
   2435 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   2436 	 * offload feature, if we load the context descriptor, we
   2437 	 * MUST provide valid values for IPCSS and TUCSS fields.
   2438 	 */
   2439 
   2440 	ipcs = WTX_TCPIP_IPCSS(offset) |
   2441 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   2442 	    WTX_TCPIP_IPCSE(ipcse);
   2443 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   2444 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   2445 		fields |= WTX_IXSM;
   2446 	}
   2447 
   2448 	offset += iphl;
   2449 
   2450 	if (m0->m_pkthdr.csum_flags &
   2451 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   2452 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   2453 		fields |= WTX_TXSM;
   2454 		tucs = WTX_TCPIP_TUCSS(offset) |
   2455 		    WTX_TCPIP_TUCSO(offset +
   2456 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   2457 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   2458 	} else if ((m0->m_pkthdr.csum_flags &
   2459 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   2460 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   2461 		fields |= WTX_TXSM;
   2462 		tucs = WTX_TCPIP_TUCSS(offset) |
   2463 		    WTX_TCPIP_TUCSO(offset +
   2464 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   2465 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   2466 	} else {
   2467 		/* Just initialize it to a valid TCP context. */
   2468 		tucs = WTX_TCPIP_TUCSS(offset) |
   2469 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   2470 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   2471 	}
   2472 
   2473 	/* Fill in the context descriptor. */
   2474 	t = (struct livengood_tcpip_ctxdesc *)
   2475 	    &sc->sc_txdescs[sc->sc_txnext];
   2476 	t->tcpip_ipcs = htole32(ipcs);
   2477 	t->tcpip_tucs = htole32(tucs);
   2478 	t->tcpip_cmdlen = htole32(cmdlen);
   2479 	t->tcpip_seg = htole32(seg);
   2480 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   2481 
   2482 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   2483 	txs->txs_ndesc++;
   2484 
   2485 	*cmdp = cmd;
   2486 	*fieldsp = fields;
   2487 
   2488 	return 0;
   2489 }
   2490 
   2491 static void
   2492 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   2493 {
   2494 	struct mbuf *m;
   2495 	int i;
   2496 
   2497 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   2498 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   2499 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   2500 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   2501 		    m->m_data, m->m_len, m->m_flags);
   2502 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   2503 	    i, i == 1 ? "" : "s");
   2504 }
   2505 
   2506 /*
   2507  * wm_82547_txfifo_stall:
   2508  *
   2509  *	Callout used to wait for the 82547 Tx FIFO to drain,
   2510  *	reset the FIFO pointers, and restart packet transmission.
   2511  */
   2512 static void
   2513 wm_82547_txfifo_stall(void *arg)
   2514 {
   2515 	struct wm_softc *sc = arg;
   2516 #ifndef WM_MPSAFE
   2517 	int s;
   2518 
   2519 	s = splnet();
   2520 #endif
   2521 	WM_LOCK(sc);
   2522 
   2523 	if (sc->sc_stopping)
   2524 		goto out;
   2525 
   2526 	if (sc->sc_txfifo_stall) {
   2527 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   2528 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   2529 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   2530 			/*
   2531 			 * Packets have drained.  Stop transmitter, reset
   2532 			 * FIFO pointers, restart transmitter, and kick
   2533 			 * the packet queue.
   2534 			 */
   2535 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   2536 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   2537 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   2538 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   2539 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   2540 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   2541 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   2542 			CSR_WRITE_FLUSH(sc);
   2543 
   2544 			sc->sc_txfifo_head = 0;
   2545 			sc->sc_txfifo_stall = 0;
   2546 			wm_start_locked(&sc->sc_ethercom.ec_if);
   2547 		} else {
   2548 			/*
   2549 			 * Still waiting for packets to drain; try again in
   2550 			 * another tick.
   2551 			 */
   2552 			callout_schedule(&sc->sc_txfifo_ch, 1);
   2553 		}
   2554 	}
   2555 
   2556 out:
   2557 	WM_UNLOCK(sc);
   2558 #ifndef WM_MPSAFE
   2559 	splx(s);
   2560 #endif
   2561 }
   2562 
   2563 static void
   2564 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   2565 {
   2566 	uint32_t reg;
   2567 
   2568 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   2569 
   2570 	if (on != 0)
   2571 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   2572 	else
   2573 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   2574 
   2575 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   2576 }
   2577 
   2578 /*
   2579  * wm_82547_txfifo_bugchk:
   2580  *
   2581  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   2582  *	prevent enqueueing a packet that would wrap around the end
   2583  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   2584  *
   2585  *	We do this by checking the amount of space before the end
   2586  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   2587  *	the Tx FIFO, wait for all remaining packets to drain, reset
   2588  *	the internal FIFO pointers to the beginning, and restart
   2589  *	transmission on the interface.
   2590  */
   2591 #define	WM_FIFO_HDR		0x10
   2592 #define	WM_82547_PAD_LEN	0x3e0
   2593 static int
   2594 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   2595 {
   2596 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   2597 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   2598 
   2599 	/* Just return if already stalled. */
   2600 	if (sc->sc_txfifo_stall)
   2601 		return 1;
   2602 
   2603 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   2604 		/* Stall only occurs in half-duplex mode. */
   2605 		goto send_packet;
   2606 	}
   2607 
   2608 	if (len >= WM_82547_PAD_LEN + space) {
   2609 		sc->sc_txfifo_stall = 1;
   2610 		callout_schedule(&sc->sc_txfifo_ch, 1);
   2611 		return 1;
   2612 	}
   2613 
   2614  send_packet:
   2615 	sc->sc_txfifo_head += len;
   2616 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   2617 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   2618 
   2619 	return 0;
   2620 }
   2621 
   2622 /*
   2623  * wm_start:		[ifnet interface function]
   2624  *
   2625  *	Start packet transmission on the interface.
   2626  */
   2627 static void
   2628 wm_start(struct ifnet *ifp)
   2629 {
   2630 	struct wm_softc *sc = ifp->if_softc;
   2631 
   2632 	WM_LOCK(sc);
   2633 	if (!sc->sc_stopping)
   2634 		wm_start_locked(ifp);
   2635 	WM_UNLOCK(sc);
   2636 }
   2637 
   2638 static void
   2639 wm_start_locked(struct ifnet *ifp)
   2640 {
   2641 	struct wm_softc *sc = ifp->if_softc;
   2642 	struct mbuf *m0;
   2643 	struct m_tag *mtag;
   2644 	struct wm_txsoft *txs;
   2645 	bus_dmamap_t dmamap;
   2646 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   2647 	bus_addr_t curaddr;
   2648 	bus_size_t seglen, curlen;
   2649 	uint32_t cksumcmd;
   2650 	uint8_t cksumfields;
   2651 
   2652 	KASSERT(WM_LOCKED(sc));
   2653 
   2654 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   2655 		return;
   2656 
   2657 	/*
   2658 	 * Remember the previous number of free descriptors.
   2659 	 */
   2660 	ofree = sc->sc_txfree;
   2661 
   2662 	/*
   2663 	 * Loop through the send queue, setting up transmit descriptors
   2664 	 * until we drain the queue, or use up all available transmit
   2665 	 * descriptors.
   2666 	 */
   2667 	for (;;) {
   2668 		m0 = NULL;
   2669 
   2670 		/* Get a work queue entry. */
   2671 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   2672 			wm_txintr(sc);
   2673 			if (sc->sc_txsfree == 0) {
   2674 				DPRINTF(WM_DEBUG_TX,
   2675 				    ("%s: TX: no free job descriptors\n",
   2676 					device_xname(sc->sc_dev)));
   2677 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   2678 				break;
   2679 			}
   2680 		}
   2681 
   2682 		/* Grab a packet off the queue. */
   2683 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   2684 		if (m0 == NULL)
   2685 			break;
   2686 
   2687 		DPRINTF(WM_DEBUG_TX,
   2688 		    ("%s: TX: have packet to transmit: %p\n",
   2689 		    device_xname(sc->sc_dev), m0));
   2690 
   2691 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   2692 		dmamap = txs->txs_dmamap;
   2693 
   2694 		use_tso = (m0->m_pkthdr.csum_flags &
   2695 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   2696 
   2697 		/*
   2698 		 * So says the Linux driver:
   2699 		 * The controller does a simple calculation to make sure
   2700 		 * there is enough room in the FIFO before initiating the
   2701 		 * DMA for each buffer.  The calc is:
   2702 		 *	4 = ceil(buffer len / MSS)
   2703 		 * To make sure we don't overrun the FIFO, adjust the max
   2704 		 * buffer len if the MSS drops.
   2705 		 */
   2706 		dmamap->dm_maxsegsz =
   2707 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   2708 		    ? m0->m_pkthdr.segsz << 2
   2709 		    : WTX_MAX_LEN;
   2710 
   2711 		/*
   2712 		 * Load the DMA map.  If this fails, the packet either
   2713 		 * didn't fit in the allotted number of segments, or we
   2714 		 * were short on resources.  For the too-many-segments
   2715 		 * case, we simply report an error and drop the packet,
   2716 		 * since we can't sanely copy a jumbo packet to a single
   2717 		 * buffer.
   2718 		 */
   2719 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   2720 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   2721 		if (error) {
   2722 			if (error == EFBIG) {
   2723 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   2724 				log(LOG_ERR, "%s: Tx packet consumes too many "
   2725 				    "DMA segments, dropping...\n",
   2726 				    device_xname(sc->sc_dev));
   2727 				wm_dump_mbuf_chain(sc, m0);
   2728 				m_freem(m0);
   2729 				continue;
   2730 			}
   2731 			/*
   2732 			 * Short on resources, just stop for now.
   2733 			 */
   2734 			DPRINTF(WM_DEBUG_TX,
   2735 			    ("%s: TX: dmamap load failed: %d\n",
   2736 			    device_xname(sc->sc_dev), error));
   2737 			break;
   2738 		}
   2739 
   2740 		segs_needed = dmamap->dm_nsegs;
   2741 		if (use_tso) {
   2742 			/* For sentinel descriptor; see below. */
   2743 			segs_needed++;
   2744 		}
   2745 
   2746 		/*
   2747 		 * Ensure we have enough descriptors free to describe
   2748 		 * the packet.  Note, we always reserve one descriptor
   2749 		 * at the end of the ring due to the semantics of the
   2750 		 * TDT register, plus one more in the event we need
   2751 		 * to load offload context.
   2752 		 */
   2753 		if (segs_needed > sc->sc_txfree - 2) {
   2754 			/*
   2755 			 * Not enough free descriptors to transmit this
   2756 			 * packet.  We haven't committed anything yet,
   2757 			 * so just unload the DMA map, put the packet
   2758 			 * pack on the queue, and punt.  Notify the upper
   2759 			 * layer that there are no more slots left.
   2760 			 */
   2761 			DPRINTF(WM_DEBUG_TX,
   2762 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   2763 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   2764 			    segs_needed, sc->sc_txfree - 1));
   2765 			ifp->if_flags |= IFF_OACTIVE;
   2766 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2767 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   2768 			break;
   2769 		}
   2770 
   2771 		/*
   2772 		 * Check for 82547 Tx FIFO bug.  We need to do this
   2773 		 * once we know we can transmit the packet, since we
   2774 		 * do some internal FIFO space accounting here.
   2775 		 */
   2776 		if (sc->sc_type == WM_T_82547 &&
   2777 		    wm_82547_txfifo_bugchk(sc, m0)) {
   2778 			DPRINTF(WM_DEBUG_TX,
   2779 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   2780 			    device_xname(sc->sc_dev)));
   2781 			ifp->if_flags |= IFF_OACTIVE;
   2782 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2783 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   2784 			break;
   2785 		}
   2786 
   2787 		/*
   2788 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   2789 		 */
   2790 
   2791 		DPRINTF(WM_DEBUG_TX,
   2792 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   2793 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   2794 
   2795 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   2796 
   2797 		/*
   2798 		 * Store a pointer to the packet so that we can free it
   2799 		 * later.
   2800 		 *
   2801 		 * Initially, we consider the number of descriptors the
   2802 		 * packet uses the number of DMA segments.  This may be
   2803 		 * incremented by 1 if we do checksum offload (a descriptor
   2804 		 * is used to set the checksum context).
   2805 		 */
   2806 		txs->txs_mbuf = m0;
   2807 		txs->txs_firstdesc = sc->sc_txnext;
   2808 		txs->txs_ndesc = segs_needed;
   2809 
   2810 		/* Set up offload parameters for this packet. */
   2811 		if (m0->m_pkthdr.csum_flags &
   2812 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   2813 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   2814 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   2815 			if (wm_tx_offload(sc, txs, &cksumcmd,
   2816 					  &cksumfields) != 0) {
   2817 				/* Error message already displayed. */
   2818 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   2819 				continue;
   2820 			}
   2821 		} else {
   2822 			cksumcmd = 0;
   2823 			cksumfields = 0;
   2824 		}
   2825 
   2826 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   2827 
   2828 		/* Sync the DMA map. */
   2829 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   2830 		    BUS_DMASYNC_PREWRITE);
   2831 
   2832 		/*
   2833 		 * Initialize the transmit descriptor.
   2834 		 */
   2835 		for (nexttx = sc->sc_txnext, seg = 0;
   2836 		     seg < dmamap->dm_nsegs; seg++) {
   2837 			for (seglen = dmamap->dm_segs[seg].ds_len,
   2838 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   2839 			     seglen != 0;
   2840 			     curaddr += curlen, seglen -= curlen,
   2841 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   2842 				curlen = seglen;
   2843 
   2844 				/*
   2845 				 * So says the Linux driver:
   2846 				 * Work around for premature descriptor
   2847 				 * write-backs in TSO mode.  Append a
   2848 				 * 4-byte sentinel descriptor.
   2849 				 */
   2850 				if (use_tso &&
   2851 				    seg == dmamap->dm_nsegs - 1 &&
   2852 				    curlen > 8)
   2853 					curlen -= 4;
   2854 
   2855 				wm_set_dma_addr(
   2856 				    &sc->sc_txdescs[nexttx].wtx_addr,
   2857 				    curaddr);
   2858 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   2859 				    htole32(cksumcmd | curlen);
   2860 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   2861 				    0;
   2862 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   2863 				    cksumfields;
   2864 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   2865 				lasttx = nexttx;
   2866 
   2867 				DPRINTF(WM_DEBUG_TX,
   2868 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   2869 				     "len %#04zx\n",
   2870 				    device_xname(sc->sc_dev), nexttx,
   2871 				    (uint64_t)curaddr, curlen));
   2872 			}
   2873 		}
   2874 
   2875 		KASSERT(lasttx != -1);
   2876 
   2877 		/*
   2878 		 * Set up the command byte on the last descriptor of
   2879 		 * the packet.  If we're in the interrupt delay window,
   2880 		 * delay the interrupt.
   2881 		 */
   2882 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2883 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   2884 
   2885 		/*
   2886 		 * If VLANs are enabled and the packet has a VLAN tag, set
   2887 		 * up the descriptor to encapsulate the packet for us.
   2888 		 *
   2889 		 * This is only valid on the last descriptor of the packet.
   2890 		 */
   2891 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   2892 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2893 			    htole32(WTX_CMD_VLE);
   2894 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   2895 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2896 		}
   2897 
   2898 		txs->txs_lastdesc = lasttx;
   2899 
   2900 		DPRINTF(WM_DEBUG_TX,
   2901 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   2902 		    device_xname(sc->sc_dev),
   2903 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   2904 
   2905 		/* Sync the descriptors we're using. */
   2906 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   2907 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2908 
   2909 		/* Give the packet to the chip. */
   2910 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   2911 
   2912 		DPRINTF(WM_DEBUG_TX,
   2913 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   2914 
   2915 		DPRINTF(WM_DEBUG_TX,
   2916 		    ("%s: TX: finished transmitting packet, job %d\n",
   2917 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   2918 
   2919 		/* Advance the tx pointer. */
   2920 		sc->sc_txfree -= txs->txs_ndesc;
   2921 		sc->sc_txnext = nexttx;
   2922 
   2923 		sc->sc_txsfree--;
   2924 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   2925 
   2926 		/* Pass the packet to any BPF listeners. */
   2927 		bpf_mtap(ifp, m0);
   2928 	}
   2929 
   2930 	if (m0 != NULL) {
   2931 		ifp->if_flags |= IFF_OACTIVE;
   2932 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   2933 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   2934 		m_freem(m0);
   2935 	}
   2936 
   2937 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   2938 		/* No more slots; notify upper layer. */
   2939 		ifp->if_flags |= IFF_OACTIVE;
   2940 	}
   2941 
   2942 	if (sc->sc_txfree != ofree) {
   2943 		/* Set a watchdog timer in case the chip flakes out. */
   2944 		ifp->if_timer = 5;
   2945 	}
   2946 }
   2947 
   2948 /*
   2949  * wm_nq_tx_offload:
   2950  *
   2951  *	Set up TCP/IP checksumming parameters for the
   2952  *	specified packet, for NEWQUEUE devices
   2953  */
   2954 static int
   2955 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   2956     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   2957 {
   2958 	struct mbuf *m0 = txs->txs_mbuf;
   2959 	struct m_tag *mtag;
   2960 	uint32_t vl_len, mssidx, cmdc;
   2961 	struct ether_header *eh;
   2962 	int offset, iphl;
   2963 
   2964 	/*
   2965 	 * XXX It would be nice if the mbuf pkthdr had offset
   2966 	 * fields for the protocol headers.
   2967 	 */
   2968 	*cmdlenp = 0;
   2969 	*fieldsp = 0;
   2970 
   2971 	eh = mtod(m0, struct ether_header *);
   2972 	switch (htons(eh->ether_type)) {
   2973 	case ETHERTYPE_IP:
   2974 	case ETHERTYPE_IPV6:
   2975 		offset = ETHER_HDR_LEN;
   2976 		break;
   2977 
   2978 	case ETHERTYPE_VLAN:
   2979 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2980 		break;
   2981 
   2982 	default:
   2983 		/*
   2984 		 * Don't support this protocol or encapsulation.
   2985 		 */
   2986 		*do_csum = false;
   2987 		return 0;
   2988 	}
   2989 	*do_csum = true;
   2990 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   2991 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   2992 
   2993 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   2994 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   2995 
   2996 	if ((m0->m_pkthdr.csum_flags &
   2997 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   2998 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   2999 	} else {
   3000 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   3001 	}
   3002 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   3003 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   3004 
   3005 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   3006 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   3007 		     << NQTXC_VLLEN_VLAN_SHIFT);
   3008 		*cmdlenp |= NQTX_CMD_VLE;
   3009 	}
   3010 
   3011 	mssidx = 0;
   3012 
   3013 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   3014 		int hlen = offset + iphl;
   3015 		int tcp_hlen;
   3016 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   3017 
   3018 		if (__predict_false(m0->m_len <
   3019 				    (hlen + sizeof(struct tcphdr)))) {
   3020 			/*
   3021 			 * TCP/IP headers are not in the first mbuf; we need
   3022 			 * to do this the slow and painful way.  Let's just
   3023 			 * hope this doesn't happen very often.
   3024 			 */
   3025 			struct tcphdr th;
   3026 
   3027 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   3028 
   3029 			m_copydata(m0, hlen, sizeof(th), &th);
   3030 			if (v4) {
   3031 				struct ip ip;
   3032 
   3033 				m_copydata(m0, offset, sizeof(ip), &ip);
   3034 				ip.ip_len = 0;
   3035 				m_copyback(m0,
   3036 				    offset + offsetof(struct ip, ip_len),
   3037 				    sizeof(ip.ip_len), &ip.ip_len);
   3038 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   3039 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   3040 			} else {
   3041 				struct ip6_hdr ip6;
   3042 
   3043 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   3044 				ip6.ip6_plen = 0;
   3045 				m_copyback(m0,
   3046 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   3047 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   3048 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   3049 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   3050 			}
   3051 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   3052 			    sizeof(th.th_sum), &th.th_sum);
   3053 
   3054 			tcp_hlen = th.th_off << 2;
   3055 		} else {
   3056 			/*
   3057 			 * TCP/IP headers are in the first mbuf; we can do
   3058 			 * this the easy way.
   3059 			 */
   3060 			struct tcphdr *th;
   3061 
   3062 			if (v4) {
   3063 				struct ip *ip =
   3064 				    (void *)(mtod(m0, char *) + offset);
   3065 				th = (void *)(mtod(m0, char *) + hlen);
   3066 
   3067 				ip->ip_len = 0;
   3068 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   3069 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   3070 			} else {
   3071 				struct ip6_hdr *ip6 =
   3072 				    (void *)(mtod(m0, char *) + offset);
   3073 				th = (void *)(mtod(m0, char *) + hlen);
   3074 
   3075 				ip6->ip6_plen = 0;
   3076 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   3077 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   3078 			}
   3079 			tcp_hlen = th->th_off << 2;
   3080 		}
   3081 		hlen += tcp_hlen;
   3082 		*cmdlenp |= NQTX_CMD_TSE;
   3083 
   3084 		if (v4) {
   3085 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   3086 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   3087 		} else {
   3088 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   3089 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   3090 		}
   3091 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   3092 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   3093 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   3094 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   3095 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   3096 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   3097 	} else {
   3098 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   3099 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   3100 	}
   3101 
   3102 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   3103 		*fieldsp |= NQTXD_FIELDS_IXSM;
   3104 		cmdc |= NQTXC_CMD_IP4;
   3105 	}
   3106 
   3107 	if (m0->m_pkthdr.csum_flags &
   3108 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   3109 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   3110 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   3111 			cmdc |= NQTXC_CMD_TCP;
   3112 		} else {
   3113 			cmdc |= NQTXC_CMD_UDP;
   3114 		}
   3115 		cmdc |= NQTXC_CMD_IP4;
   3116 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   3117 	}
   3118 	if (m0->m_pkthdr.csum_flags &
   3119 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   3120 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   3121 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   3122 			cmdc |= NQTXC_CMD_TCP;
   3123 		} else {
   3124 			cmdc |= NQTXC_CMD_UDP;
   3125 		}
   3126 		cmdc |= NQTXC_CMD_IP6;
   3127 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   3128 	}
   3129 
   3130 	/* Fill in the context descriptor. */
   3131 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   3132 	    htole32(vl_len);
   3133 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   3134 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   3135 	    htole32(cmdc);
   3136 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   3137 	    htole32(mssidx);
   3138 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   3139 	DPRINTF(WM_DEBUG_TX,
   3140 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   3141 	    sc->sc_txnext, 0, vl_len));
   3142 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   3143 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   3144 	txs->txs_ndesc++;
   3145 	return 0;
   3146 }
   3147 
   3148 /*
   3149  * wm_nq_start:		[ifnet interface function]
   3150  *
   3151  *	Start packet transmission on the interface for NEWQUEUE devices
   3152  */
   3153 static void
   3154 wm_nq_start(struct ifnet *ifp)
   3155 {
   3156 	struct wm_softc *sc = ifp->if_softc;
   3157 
   3158 	WM_LOCK(sc);
   3159 	if (!sc->sc_stopping)
   3160 		wm_nq_start_locked(ifp);
   3161 	WM_UNLOCK(sc);
   3162 }
   3163 
   3164 static void
   3165 wm_nq_start_locked(struct ifnet *ifp)
   3166 {
   3167 	struct wm_softc *sc = ifp->if_softc;
   3168 	struct mbuf *m0;
   3169 	struct m_tag *mtag;
   3170 	struct wm_txsoft *txs;
   3171 	bus_dmamap_t dmamap;
   3172 	int error, nexttx, lasttx = -1, seg, segs_needed;
   3173 	bool do_csum, sent;
   3174 
   3175 	KASSERT(WM_LOCKED(sc));
   3176 
   3177 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   3178 		return;
   3179 
   3180 	sent = false;
   3181 
   3182 	/*
   3183 	 * Loop through the send queue, setting up transmit descriptors
   3184 	 * until we drain the queue, or use up all available transmit
   3185 	 * descriptors.
   3186 	 */
   3187 	for (;;) {
   3188 		m0 = NULL;
   3189 
   3190 		/* Get a work queue entry. */
   3191 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   3192 			wm_txintr(sc);
   3193 			if (sc->sc_txsfree == 0) {
   3194 				DPRINTF(WM_DEBUG_TX,
   3195 				    ("%s: TX: no free job descriptors\n",
   3196 					device_xname(sc->sc_dev)));
   3197 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   3198 				break;
   3199 			}
   3200 		}
   3201 
   3202 		/* Grab a packet off the queue. */
   3203 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   3204 		if (m0 == NULL)
   3205 			break;
   3206 
   3207 		DPRINTF(WM_DEBUG_TX,
   3208 		    ("%s: TX: have packet to transmit: %p\n",
   3209 		    device_xname(sc->sc_dev), m0));
   3210 
   3211 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   3212 		dmamap = txs->txs_dmamap;
   3213 
   3214 		/*
   3215 		 * Load the DMA map.  If this fails, the packet either
   3216 		 * didn't fit in the allotted number of segments, or we
   3217 		 * were short on resources.  For the too-many-segments
   3218 		 * case, we simply report an error and drop the packet,
   3219 		 * since we can't sanely copy a jumbo packet to a single
   3220 		 * buffer.
   3221 		 */
   3222 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   3223 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   3224 		if (error) {
   3225 			if (error == EFBIG) {
   3226 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   3227 				log(LOG_ERR, "%s: Tx packet consumes too many "
   3228 				    "DMA segments, dropping...\n",
   3229 				    device_xname(sc->sc_dev));
   3230 				wm_dump_mbuf_chain(sc, m0);
   3231 				m_freem(m0);
   3232 				continue;
   3233 			}
   3234 			/*
   3235 			 * Short on resources, just stop for now.
   3236 			 */
   3237 			DPRINTF(WM_DEBUG_TX,
   3238 			    ("%s: TX: dmamap load failed: %d\n",
   3239 			    device_xname(sc->sc_dev), error));
   3240 			break;
   3241 		}
   3242 
   3243 		segs_needed = dmamap->dm_nsegs;
   3244 
   3245 		/*
   3246 		 * Ensure we have enough descriptors free to describe
   3247 		 * the packet.  Note, we always reserve one descriptor
   3248 		 * at the end of the ring due to the semantics of the
   3249 		 * TDT register, plus one more in the event we need
   3250 		 * to load offload context.
   3251 		 */
   3252 		if (segs_needed > sc->sc_txfree - 2) {
   3253 			/*
   3254 			 * Not enough free descriptors to transmit this
   3255 			 * packet.  We haven't committed anything yet,
   3256 			 * so just unload the DMA map, put the packet
   3257 			 * pack on the queue, and punt.  Notify the upper
   3258 			 * layer that there are no more slots left.
   3259 			 */
   3260 			DPRINTF(WM_DEBUG_TX,
   3261 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   3262 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   3263 			    segs_needed, sc->sc_txfree - 1));
   3264 			ifp->if_flags |= IFF_OACTIVE;
   3265 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   3266 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   3267 			break;
   3268 		}
   3269 
   3270 		/*
   3271 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   3272 		 */
   3273 
   3274 		DPRINTF(WM_DEBUG_TX,
   3275 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   3276 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   3277 
   3278 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   3279 
   3280 		/*
   3281 		 * Store a pointer to the packet so that we can free it
   3282 		 * later.
   3283 		 *
   3284 		 * Initially, we consider the number of descriptors the
   3285 		 * packet uses the number of DMA segments.  This may be
   3286 		 * incremented by 1 if we do checksum offload (a descriptor
   3287 		 * is used to set the checksum context).
   3288 		 */
   3289 		txs->txs_mbuf = m0;
   3290 		txs->txs_firstdesc = sc->sc_txnext;
   3291 		txs->txs_ndesc = segs_needed;
   3292 
   3293 		/* Set up offload parameters for this packet. */
   3294 		uint32_t cmdlen, fields, dcmdlen;
   3295 		if (m0->m_pkthdr.csum_flags &
   3296 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   3297 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   3298 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   3299 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   3300 			    &do_csum) != 0) {
   3301 				/* Error message already displayed. */
   3302 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   3303 				continue;
   3304 			}
   3305 		} else {
   3306 			do_csum = false;
   3307 			cmdlen = 0;
   3308 			fields = 0;
   3309 		}
   3310 
   3311 		/* Sync the DMA map. */
   3312 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   3313 		    BUS_DMASYNC_PREWRITE);
   3314 
   3315 		/*
   3316 		 * Initialize the first transmit descriptor.
   3317 		 */
   3318 		nexttx = sc->sc_txnext;
   3319 		if (!do_csum) {
   3320 			/* setup a legacy descriptor */
   3321 			wm_set_dma_addr(
   3322 			    &sc->sc_txdescs[nexttx].wtx_addr,
   3323 			    dmamap->dm_segs[0].ds_addr);
   3324 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   3325 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   3326 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   3327 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   3328 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   3329 			    NULL) {
   3330 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   3331 				    htole32(WTX_CMD_VLE);
   3332 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   3333 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3334 			} else {
   3335 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   3336 			}
   3337 			dcmdlen = 0;
   3338 		} else {
   3339 			/* setup an advanced data descriptor */
   3340 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   3341 			    htole64(dmamap->dm_segs[0].ds_addr);
   3342 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   3343 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   3344 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   3345 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   3346 			    htole32(fields);
   3347 			DPRINTF(WM_DEBUG_TX,
   3348 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   3349 			    device_xname(sc->sc_dev), nexttx,
   3350 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   3351 			DPRINTF(WM_DEBUG_TX,
   3352 			    ("\t 0x%08x%08x\n", fields,
   3353 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   3354 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   3355 		}
   3356 
   3357 		lasttx = nexttx;
   3358 		nexttx = WM_NEXTTX(sc, nexttx);
   3359 		/*
   3360 		 * fill in the next descriptors. legacy or adcanced format
   3361 		 * is the same here
   3362 		 */
   3363 		for (seg = 1; seg < dmamap->dm_nsegs;
   3364 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   3365 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   3366 			    htole64(dmamap->dm_segs[seg].ds_addr);
   3367 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   3368 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   3369 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   3370 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   3371 			lasttx = nexttx;
   3372 
   3373 			DPRINTF(WM_DEBUG_TX,
   3374 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   3375 			     "len %#04zx\n",
   3376 			    device_xname(sc->sc_dev), nexttx,
   3377 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   3378 			    dmamap->dm_segs[seg].ds_len));
   3379 		}
   3380 
   3381 		KASSERT(lasttx != -1);
   3382 
   3383 		/*
   3384 		 * Set up the command byte on the last descriptor of
   3385 		 * the packet.  If we're in the interrupt delay window,
   3386 		 * delay the interrupt.
   3387 		 */
   3388 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   3389 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   3390 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   3391 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   3392 
   3393 		txs->txs_lastdesc = lasttx;
   3394 
   3395 		DPRINTF(WM_DEBUG_TX,
   3396 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   3397 		    device_xname(sc->sc_dev),
   3398 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   3399 
   3400 		/* Sync the descriptors we're using. */
   3401 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   3402 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3403 
   3404 		/* Give the packet to the chip. */
   3405 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   3406 		sent = true;
   3407 
   3408 		DPRINTF(WM_DEBUG_TX,
   3409 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   3410 
   3411 		DPRINTF(WM_DEBUG_TX,
   3412 		    ("%s: TX: finished transmitting packet, job %d\n",
   3413 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   3414 
   3415 		/* Advance the tx pointer. */
   3416 		sc->sc_txfree -= txs->txs_ndesc;
   3417 		sc->sc_txnext = nexttx;
   3418 
   3419 		sc->sc_txsfree--;
   3420 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   3421 
   3422 		/* Pass the packet to any BPF listeners. */
   3423 		bpf_mtap(ifp, m0);
   3424 	}
   3425 
   3426 	if (m0 != NULL) {
   3427 		ifp->if_flags |= IFF_OACTIVE;
   3428 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   3429 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   3430 		m_freem(m0);
   3431 	}
   3432 
   3433 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   3434 		/* No more slots; notify upper layer. */
   3435 		ifp->if_flags |= IFF_OACTIVE;
   3436 	}
   3437 
   3438 	if (sent) {
   3439 		/* Set a watchdog timer in case the chip flakes out. */
   3440 		ifp->if_timer = 5;
   3441 	}
   3442 }
   3443 
   3444 /*
   3445  * wm_watchdog:		[ifnet interface function]
   3446  *
   3447  *	Watchdog timer handler.
   3448  */
   3449 static void
   3450 wm_watchdog(struct ifnet *ifp)
   3451 {
   3452 	struct wm_softc *sc = ifp->if_softc;
   3453 
   3454 	/*
   3455 	 * Since we're using delayed interrupts, sweep up
   3456 	 * before we report an error.
   3457 	 */
   3458 	WM_LOCK(sc);
   3459 	wm_txintr(sc);
   3460 	WM_UNLOCK(sc);
   3461 
   3462 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   3463 #ifdef WM_DEBUG
   3464 		int i, j;
   3465 		struct wm_txsoft *txs;
   3466 #endif
   3467 		log(LOG_ERR,
   3468 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3469 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   3470 		    sc->sc_txnext);
   3471 		ifp->if_oerrors++;
   3472 #ifdef WM_DEBUG
   3473 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   3474 		    i = WM_NEXTTXS(sc, i)) {
   3475 		    txs = &sc->sc_txsoft[i];
   3476 		    printf("txs %d tx %d -> %d\n",
   3477 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3478 		    for (j = txs->txs_firstdesc; ;
   3479 			j = WM_NEXTTX(sc, j)) {
   3480 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3481 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   3482 			printf("\t %#08x%08x\n",
   3483 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   3484 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   3485 			if (j == txs->txs_lastdesc)
   3486 				break;
   3487 			}
   3488 		}
   3489 #endif
   3490 		/* Reset the interface. */
   3491 		(void) wm_init(ifp);
   3492 	}
   3493 
   3494 	/* Try to get more packets going. */
   3495 	ifp->if_start(ifp);
   3496 }
   3497 
   3498 static int
   3499 wm_ifflags_cb(struct ethercom *ec)
   3500 {
   3501 	struct ifnet *ifp = &ec->ec_if;
   3502 	struct wm_softc *sc = ifp->if_softc;
   3503 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3504 	int rc = 0;
   3505 
   3506 	WM_LOCK(sc);
   3507 
   3508 	if (change != 0)
   3509 		sc->sc_if_flags = ifp->if_flags;
   3510 
   3511 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   3512 		rc = ENETRESET;
   3513 		goto out;
   3514 	}
   3515 
   3516 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3517 		wm_set_filter(sc);
   3518 
   3519 	wm_set_vlan(sc);
   3520 
   3521 out:
   3522 	WM_UNLOCK(sc);
   3523 
   3524 	return rc;
   3525 }
   3526 
   3527 /*
   3528  * wm_ioctl:		[ifnet interface function]
   3529  *
   3530  *	Handle control requests from the operator.
   3531  */
   3532 static int
   3533 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3534 {
   3535 	struct wm_softc *sc = ifp->if_softc;
   3536 	struct ifreq *ifr = (struct ifreq *) data;
   3537 	struct ifaddr *ifa = (struct ifaddr *)data;
   3538 	struct sockaddr_dl *sdl;
   3539 	int s, error;
   3540 
   3541 #ifndef WM_MPSAFE
   3542 	s = splnet();
   3543 #endif
   3544 	WM_LOCK(sc);
   3545 
   3546 	switch (cmd) {
   3547 	case SIOCSIFMEDIA:
   3548 	case SIOCGIFMEDIA:
   3549 		/* Flow control requires full-duplex mode. */
   3550 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3551 		    (ifr->ifr_media & IFM_FDX) == 0)
   3552 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3553 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3554 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3555 				/* We can do both TXPAUSE and RXPAUSE. */
   3556 				ifr->ifr_media |=
   3557 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3558 			}
   3559 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3560 		}
   3561 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3562 		break;
   3563 	case SIOCINITIFADDR:
   3564 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3565 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3566 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3567 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3568 			/* unicast address is first multicast entry */
   3569 			wm_set_filter(sc);
   3570 			error = 0;
   3571 			break;
   3572 		}
   3573 		/*FALLTHROUGH*/
   3574 	default:
   3575 		WM_UNLOCK(sc);
   3576 #ifdef WM_MPSAFE
   3577 		s = splnet();
   3578 #endif
   3579 		/* It may call wm_start, so unlock here */
   3580 		error = ether_ioctl(ifp, cmd, data);
   3581 #ifdef WM_MPSAFE
   3582 		splx(s);
   3583 #endif
   3584 		WM_LOCK(sc);
   3585 
   3586 		if (error != ENETRESET)
   3587 			break;
   3588 
   3589 		error = 0;
   3590 
   3591 		if (cmd == SIOCSIFCAP) {
   3592 			WM_UNLOCK(sc);
   3593 			error = (*ifp->if_init)(ifp);
   3594 			WM_LOCK(sc);
   3595 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3596 			;
   3597 		else if (ifp->if_flags & IFF_RUNNING) {
   3598 			/*
   3599 			 * Multicast list has changed; set the hardware filter
   3600 			 * accordingly.
   3601 			 */
   3602 			wm_set_filter(sc);
   3603 		}
   3604 		break;
   3605 	}
   3606 
   3607 	WM_UNLOCK(sc);
   3608 
   3609 	/* Try to get more packets going. */
   3610 	ifp->if_start(ifp);
   3611 
   3612 #ifndef WM_MPSAFE
   3613 	splx(s);
   3614 #endif
   3615 	return error;
   3616 }
   3617 
   3618 /*
   3619  * wm_intr:
   3620  *
   3621  *	Interrupt service routine.
   3622  */
   3623 static int
   3624 wm_intr(void *arg)
   3625 {
   3626 	struct wm_softc *sc = arg;
   3627 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3628 	uint32_t icr;
   3629 	int handled = 0;
   3630 
   3631 	while (1 /* CONSTCOND */) {
   3632 		icr = CSR_READ(sc, WMREG_ICR);
   3633 		if ((icr & sc->sc_icr) == 0)
   3634 			break;
   3635 		rnd_add_uint32(&sc->rnd_source, icr);
   3636 
   3637 		WM_LOCK(sc);
   3638 
   3639 		if (sc->sc_stopping) {
   3640 			WM_UNLOCK(sc);
   3641 			break;
   3642 		}
   3643 
   3644 		handled = 1;
   3645 
   3646 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   3647 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   3648 			DPRINTF(WM_DEBUG_RX,
   3649 			    ("%s: RX: got Rx intr 0x%08x\n",
   3650 			    device_xname(sc->sc_dev),
   3651 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   3652 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   3653 		}
   3654 #endif
   3655 		wm_rxintr(sc);
   3656 
   3657 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   3658 		if (icr & ICR_TXDW) {
   3659 			DPRINTF(WM_DEBUG_TX,
   3660 			    ("%s: TX: got TXDW interrupt\n",
   3661 			    device_xname(sc->sc_dev)));
   3662 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   3663 		}
   3664 #endif
   3665 		wm_txintr(sc);
   3666 
   3667 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   3668 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   3669 			wm_linkintr(sc, icr);
   3670 		}
   3671 
   3672 		WM_UNLOCK(sc);
   3673 
   3674 		if (icr & ICR_RXO) {
   3675 #if defined(WM_DEBUG)
   3676 			log(LOG_WARNING, "%s: Receive overrun\n",
   3677 			    device_xname(sc->sc_dev));
   3678 #endif /* defined(WM_DEBUG) */
   3679 		}
   3680 	}
   3681 
   3682 	if (handled) {
   3683 		/* Try to get more packets going. */
   3684 		ifp->if_start(ifp);
   3685 	}
   3686 
   3687 	return handled;
   3688 }
   3689 
   3690 /*
   3691  * wm_txintr:
   3692  *
   3693  *	Helper; handle transmit interrupts.
   3694  */
   3695 static void
   3696 wm_txintr(struct wm_softc *sc)
   3697 {
   3698 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3699 	struct wm_txsoft *txs;
   3700 	uint8_t status;
   3701 	int i;
   3702 
   3703 	if (sc->sc_stopping)
   3704 		return;
   3705 
   3706 	ifp->if_flags &= ~IFF_OACTIVE;
   3707 
   3708 	/*
   3709 	 * Go through the Tx list and free mbufs for those
   3710 	 * frames which have been transmitted.
   3711 	 */
   3712 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   3713 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   3714 		txs = &sc->sc_txsoft[i];
   3715 
   3716 		DPRINTF(WM_DEBUG_TX,
   3717 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   3718 
   3719 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   3720 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   3721 
   3722 		status =
   3723 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   3724 		if ((status & WTX_ST_DD) == 0) {
   3725 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   3726 			    BUS_DMASYNC_PREREAD);
   3727 			break;
   3728 		}
   3729 
   3730 		DPRINTF(WM_DEBUG_TX,
   3731 		    ("%s: TX: job %d done: descs %d..%d\n",
   3732 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   3733 		    txs->txs_lastdesc));
   3734 
   3735 		/*
   3736 		 * XXX We should probably be using the statistics
   3737 		 * XXX registers, but I don't know if they exist
   3738 		 * XXX on chips before the i82544.
   3739 		 */
   3740 
   3741 #ifdef WM_EVENT_COUNTERS
   3742 		if (status & WTX_ST_TU)
   3743 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   3744 #endif /* WM_EVENT_COUNTERS */
   3745 
   3746 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   3747 			ifp->if_oerrors++;
   3748 			if (status & WTX_ST_LC)
   3749 				log(LOG_WARNING, "%s: late collision\n",
   3750 				    device_xname(sc->sc_dev));
   3751 			else if (status & WTX_ST_EC) {
   3752 				ifp->if_collisions += 16;
   3753 				log(LOG_WARNING, "%s: excessive collisions\n",
   3754 				    device_xname(sc->sc_dev));
   3755 			}
   3756 		} else
   3757 			ifp->if_opackets++;
   3758 
   3759 		sc->sc_txfree += txs->txs_ndesc;
   3760 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   3761 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3762 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   3763 		m_freem(txs->txs_mbuf);
   3764 		txs->txs_mbuf = NULL;
   3765 	}
   3766 
   3767 	/* Update the dirty transmit buffer pointer. */
   3768 	sc->sc_txsdirty = i;
   3769 	DPRINTF(WM_DEBUG_TX,
   3770 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   3771 
   3772 	/*
   3773 	 * If there are no more pending transmissions, cancel the watchdog
   3774 	 * timer.
   3775 	 */
   3776 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   3777 		ifp->if_timer = 0;
   3778 }
   3779 
   3780 /*
   3781  * wm_rxintr:
   3782  *
   3783  *	Helper; handle receive interrupts.
   3784  */
   3785 static void
   3786 wm_rxintr(struct wm_softc *sc)
   3787 {
   3788 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3789 	struct wm_rxsoft *rxs;
   3790 	struct mbuf *m;
   3791 	int i, len;
   3792 	uint8_t status, errors;
   3793 	uint16_t vlantag;
   3794 
   3795 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   3796 		rxs = &sc->sc_rxsoft[i];
   3797 
   3798 		DPRINTF(WM_DEBUG_RX,
   3799 		    ("%s: RX: checking descriptor %d\n",
   3800 		    device_xname(sc->sc_dev), i));
   3801 
   3802 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   3803 
   3804 		status = sc->sc_rxdescs[i].wrx_status;
   3805 		errors = sc->sc_rxdescs[i].wrx_errors;
   3806 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   3807 		vlantag = sc->sc_rxdescs[i].wrx_special;
   3808 
   3809 		if ((status & WRX_ST_DD) == 0) {
   3810 			/*
   3811 			 * We have processed all of the receive descriptors.
   3812 			 */
   3813 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   3814 			break;
   3815 		}
   3816 
   3817 		if (__predict_false(sc->sc_rxdiscard)) {
   3818 			DPRINTF(WM_DEBUG_RX,
   3819 			    ("%s: RX: discarding contents of descriptor %d\n",
   3820 			    device_xname(sc->sc_dev), i));
   3821 			WM_INIT_RXDESC(sc, i);
   3822 			if (status & WRX_ST_EOP) {
   3823 				/* Reset our state. */
   3824 				DPRINTF(WM_DEBUG_RX,
   3825 				    ("%s: RX: resetting rxdiscard -> 0\n",
   3826 				    device_xname(sc->sc_dev)));
   3827 				sc->sc_rxdiscard = 0;
   3828 			}
   3829 			continue;
   3830 		}
   3831 
   3832 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3833 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   3834 
   3835 		m = rxs->rxs_mbuf;
   3836 
   3837 		/*
   3838 		 * Add a new receive buffer to the ring, unless of
   3839 		 * course the length is zero. Treat the latter as a
   3840 		 * failed mapping.
   3841 		 */
   3842 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   3843 			/*
   3844 			 * Failed, throw away what we've done so
   3845 			 * far, and discard the rest of the packet.
   3846 			 */
   3847 			ifp->if_ierrors++;
   3848 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3849 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3850 			WM_INIT_RXDESC(sc, i);
   3851 			if ((status & WRX_ST_EOP) == 0)
   3852 				sc->sc_rxdiscard = 1;
   3853 			if (sc->sc_rxhead != NULL)
   3854 				m_freem(sc->sc_rxhead);
   3855 			WM_RXCHAIN_RESET(sc);
   3856 			DPRINTF(WM_DEBUG_RX,
   3857 			    ("%s: RX: Rx buffer allocation failed, "
   3858 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   3859 			    sc->sc_rxdiscard ? " (discard)" : ""));
   3860 			continue;
   3861 		}
   3862 
   3863 		m->m_len = len;
   3864 		sc->sc_rxlen += len;
   3865 		DPRINTF(WM_DEBUG_RX,
   3866 		    ("%s: RX: buffer at %p len %d\n",
   3867 		    device_xname(sc->sc_dev), m->m_data, len));
   3868 
   3869 		/*
   3870 		 * If this is not the end of the packet, keep
   3871 		 * looking.
   3872 		 */
   3873 		if ((status & WRX_ST_EOP) == 0) {
   3874 			WM_RXCHAIN_LINK(sc, m);
   3875 			DPRINTF(WM_DEBUG_RX,
   3876 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   3877 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   3878 			continue;
   3879 		}
   3880 
   3881 		/*
   3882 		 * Okay, we have the entire packet now.  The chip is
   3883 		 * configured to include the FCS except I350 and I21[01]
   3884 		 * (not all chips can be configured to strip it),
   3885 		 * so we need to trim it.
   3886 		 * May need to adjust length of previous mbuf in the
   3887 		 * chain if the current mbuf is too short.
   3888 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   3889 		 * is always set in I350, so we don't trim it.
   3890 		 */
   3891 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   3892 		    && (sc->sc_type != WM_T_I210)
   3893 		    && (sc->sc_type != WM_T_I211)) {
   3894 			if (m->m_len < ETHER_CRC_LEN) {
   3895 				sc->sc_rxtail->m_len
   3896 				    -= (ETHER_CRC_LEN - m->m_len);
   3897 				m->m_len = 0;
   3898 			} else
   3899 				m->m_len -= ETHER_CRC_LEN;
   3900 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   3901 		} else
   3902 			len = sc->sc_rxlen;
   3903 
   3904 		WM_RXCHAIN_LINK(sc, m);
   3905 
   3906 		*sc->sc_rxtailp = NULL;
   3907 		m = sc->sc_rxhead;
   3908 
   3909 		WM_RXCHAIN_RESET(sc);
   3910 
   3911 		DPRINTF(WM_DEBUG_RX,
   3912 		    ("%s: RX: have entire packet, len -> %d\n",
   3913 		    device_xname(sc->sc_dev), len));
   3914 
   3915 		/*
   3916 		 * If an error occurred, update stats and drop the packet.
   3917 		 */
   3918 		if (errors &
   3919 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   3920 			if (errors & WRX_ER_SE)
   3921 				log(LOG_WARNING, "%s: symbol error\n",
   3922 				    device_xname(sc->sc_dev));
   3923 			else if (errors & WRX_ER_SEQ)
   3924 				log(LOG_WARNING, "%s: receive sequence error\n",
   3925 				    device_xname(sc->sc_dev));
   3926 			else if (errors & WRX_ER_CE)
   3927 				log(LOG_WARNING, "%s: CRC error\n",
   3928 				    device_xname(sc->sc_dev));
   3929 			m_freem(m);
   3930 			continue;
   3931 		}
   3932 
   3933 		/*
   3934 		 * No errors.  Receive the packet.
   3935 		 */
   3936 		m->m_pkthdr.rcvif = ifp;
   3937 		m->m_pkthdr.len = len;
   3938 
   3939 		/*
   3940 		 * If VLANs are enabled, VLAN packets have been unwrapped
   3941 		 * for us.  Associate the tag with the packet.
   3942 		 */
   3943 		/* XXXX should check for i350 and i354 */
   3944 		if ((status & WRX_ST_VP) != 0) {
   3945 			VLAN_INPUT_TAG(ifp, m,
   3946 			    le16toh(vlantag),
   3947 			    continue);
   3948 		}
   3949 
   3950 		/*
   3951 		 * Set up checksum info for this packet.
   3952 		 */
   3953 		if ((status & WRX_ST_IXSM) == 0) {
   3954 			if (status & WRX_ST_IPCS) {
   3955 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   3956 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3957 				if (errors & WRX_ER_IPE)
   3958 					m->m_pkthdr.csum_flags |=
   3959 					    M_CSUM_IPv4_BAD;
   3960 			}
   3961 			if (status & WRX_ST_TCPCS) {
   3962 				/*
   3963 				 * Note: we don't know if this was TCP or UDP,
   3964 				 * so we just set both bits, and expect the
   3965 				 * upper layers to deal.
   3966 				 */
   3967 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   3968 				m->m_pkthdr.csum_flags |=
   3969 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   3970 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   3971 				if (errors & WRX_ER_TCPE)
   3972 					m->m_pkthdr.csum_flags |=
   3973 					    M_CSUM_TCP_UDP_BAD;
   3974 			}
   3975 		}
   3976 
   3977 		ifp->if_ipackets++;
   3978 
   3979 		WM_UNLOCK(sc);
   3980 
   3981 		/* Pass this up to any BPF listeners. */
   3982 		bpf_mtap(ifp, m);
   3983 
   3984 		/* Pass it on. */
   3985 		(*ifp->if_input)(ifp, m);
   3986 
   3987 		WM_LOCK(sc);
   3988 
   3989 		if (sc->sc_stopping)
   3990 			break;
   3991 	}
   3992 
   3993 	/* Update the receive pointer. */
   3994 	sc->sc_rxptr = i;
   3995 
   3996 	DPRINTF(WM_DEBUG_RX,
   3997 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   3998 }
   3999 
   4000 /*
   4001  * wm_linkintr_gmii:
   4002  *
   4003  *	Helper; handle link interrupts for GMII.
   4004  */
   4005 static void
   4006 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   4007 {
   4008 
   4009 	KASSERT(WM_LOCKED(sc));
   4010 
   4011 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   4012 		__func__));
   4013 
   4014 	if (icr & ICR_LSC) {
   4015 		DPRINTF(WM_DEBUG_LINK,
   4016 		    ("%s: LINK: LSC -> mii_pollstat\n",
   4017 			device_xname(sc->sc_dev)));
   4018 		mii_pollstat(&sc->sc_mii);
   4019 		if (sc->sc_type == WM_T_82543) {
   4020 			int miistatus, active;
   4021 
   4022 			/*
   4023 			 * With 82543, we need to force speed and
   4024 			 * duplex on the MAC equal to what the PHY
   4025 			 * speed and duplex configuration is.
   4026 			 */
   4027 			miistatus = sc->sc_mii.mii_media_status;
   4028 
   4029 			if (miistatus & IFM_ACTIVE) {
   4030 				active = sc->sc_mii.mii_media_active;
   4031 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   4032 				switch (IFM_SUBTYPE(active)) {
   4033 				case IFM_10_T:
   4034 					sc->sc_ctrl |= CTRL_SPEED_10;
   4035 					break;
   4036 				case IFM_100_TX:
   4037 					sc->sc_ctrl |= CTRL_SPEED_100;
   4038 					break;
   4039 				case IFM_1000_T:
   4040 					sc->sc_ctrl |= CTRL_SPEED_1000;
   4041 					break;
   4042 				default:
   4043 					/*
   4044 					 * fiber?
   4045 					 * Shoud not enter here.
   4046 					 */
   4047 					printf("unknown media (%x)\n",
   4048 					    active);
   4049 					break;
   4050 				}
   4051 				if (active & IFM_FDX)
   4052 					sc->sc_ctrl |= CTRL_FD;
   4053 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4054 			}
   4055 		} else if ((sc->sc_type == WM_T_ICH8)
   4056 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   4057 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   4058 		} else if (sc->sc_type == WM_T_PCH) {
   4059 			wm_k1_gig_workaround_hv(sc,
   4060 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   4061 		}
   4062 
   4063 		if ((sc->sc_phytype == WMPHY_82578)
   4064 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   4065 			== IFM_1000_T)) {
   4066 
   4067 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   4068 				delay(200*1000); /* XXX too big */
   4069 
   4070 				/* Link stall fix for link up */
   4071 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   4072 				    HV_MUX_DATA_CTRL,
   4073 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   4074 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   4075 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   4076 				    HV_MUX_DATA_CTRL,
   4077 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   4078 			}
   4079 		}
   4080 	} else if (icr & ICR_RXSEQ) {
   4081 		DPRINTF(WM_DEBUG_LINK,
   4082 		    ("%s: LINK Receive sequence error\n",
   4083 			device_xname(sc->sc_dev)));
   4084 	}
   4085 }
   4086 
   4087 /*
   4088  * wm_linkintr_tbi:
   4089  *
   4090  *	Helper; handle link interrupts for TBI mode.
   4091  */
   4092 static void
   4093 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   4094 {
   4095 	uint32_t status;
   4096 
   4097 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   4098 		__func__));
   4099 
   4100 	status = CSR_READ(sc, WMREG_STATUS);
   4101 	if (icr & ICR_LSC) {
   4102 		if (status & STATUS_LU) {
   4103 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   4104 			    device_xname(sc->sc_dev),
   4105 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   4106 			/*
   4107 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   4108 			 * so we should update sc->sc_ctrl
   4109 			 */
   4110 
   4111 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4112 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   4113 			sc->sc_fcrtl &= ~FCRTL_XONE;
   4114 			if (status & STATUS_FD)
   4115 				sc->sc_tctl |=
   4116 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4117 			else
   4118 				sc->sc_tctl |=
   4119 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   4120 			if (sc->sc_ctrl & CTRL_TFCE)
   4121 				sc->sc_fcrtl |= FCRTL_XONE;
   4122 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4123 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   4124 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   4125 				      sc->sc_fcrtl);
   4126 			sc->sc_tbi_linkup = 1;
   4127 		} else {
   4128 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   4129 			    device_xname(sc->sc_dev)));
   4130 			sc->sc_tbi_linkup = 0;
   4131 		}
   4132 		wm_tbi_set_linkled(sc);
   4133 	} else if (icr & ICR_RXCFG) {
   4134 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   4135 		    device_xname(sc->sc_dev)));
   4136 		sc->sc_tbi_nrxcfg++;
   4137 		wm_check_for_link(sc);
   4138 	} else if (icr & ICR_RXSEQ) {
   4139 		DPRINTF(WM_DEBUG_LINK,
   4140 		    ("%s: LINK: Receive sequence error\n",
   4141 		    device_xname(sc->sc_dev)));
   4142 	}
   4143 }
   4144 
   4145 /*
   4146  * wm_linkintr:
   4147  *
   4148  *	Helper; handle link interrupts.
   4149  */
   4150 static void
   4151 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   4152 {
   4153 
   4154 	if (sc->sc_flags & WM_F_HAS_MII)
   4155 		wm_linkintr_gmii(sc, icr);
   4156 	else
   4157 		wm_linkintr_tbi(sc, icr);
   4158 }
   4159 
   4160 /*
   4161  * wm_tick:
   4162  *
   4163  *	One second timer, used to check link status, sweep up
   4164  *	completed transmit jobs, etc.
   4165  */
   4166 static void
   4167 wm_tick(void *arg)
   4168 {
   4169 	struct wm_softc *sc = arg;
   4170 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4171 #ifndef WM_MPSAFE
   4172 	int s;
   4173 
   4174 	s = splnet();
   4175 #endif
   4176 
   4177 	WM_LOCK(sc);
   4178 
   4179 	if (sc->sc_stopping)
   4180 		goto out;
   4181 
   4182 	if (sc->sc_type >= WM_T_82542_2_1) {
   4183 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   4184 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   4185 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   4186 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   4187 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   4188 	}
   4189 
   4190 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4191 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   4192 	    + CSR_READ(sc, WMREG_CRCERRS)
   4193 	    + CSR_READ(sc, WMREG_ALGNERRC)
   4194 	    + CSR_READ(sc, WMREG_SYMERRC)
   4195 	    + CSR_READ(sc, WMREG_RXERRC)
   4196 	    + CSR_READ(sc, WMREG_SEC)
   4197 	    + CSR_READ(sc, WMREG_CEXTERR)
   4198 	    + CSR_READ(sc, WMREG_RLEC);
   4199 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   4200 
   4201 	if (sc->sc_flags & WM_F_HAS_MII)
   4202 		mii_tick(&sc->sc_mii);
   4203 	else
   4204 		wm_tbi_check_link(sc);
   4205 
   4206 out:
   4207 	WM_UNLOCK(sc);
   4208 #ifndef WM_MPSAFE
   4209 	splx(s);
   4210 #endif
   4211 
   4212 	if (!sc->sc_stopping)
   4213 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4214 }
   4215 
   4216 /*
   4217  * wm_reset:
   4218  *
   4219  *	Reset the i82542 chip.
   4220  */
   4221 static void
   4222 wm_reset(struct wm_softc *sc)
   4223 {
   4224 	int phy_reset = 0;
   4225 	int error = 0;
   4226 	uint32_t reg, mask;
   4227 
   4228 	/*
   4229 	 * Allocate on-chip memory according to the MTU size.
   4230 	 * The Packet Buffer Allocation register must be written
   4231 	 * before the chip is reset.
   4232 	 */
   4233 	switch (sc->sc_type) {
   4234 	case WM_T_82547:
   4235 	case WM_T_82547_2:
   4236 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4237 		    PBA_22K : PBA_30K;
   4238 		sc->sc_txfifo_head = 0;
   4239 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4240 		sc->sc_txfifo_size =
   4241 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4242 		sc->sc_txfifo_stall = 0;
   4243 		break;
   4244 	case WM_T_82571:
   4245 	case WM_T_82572:
   4246 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4247 	case WM_T_I350:
   4248 	case WM_T_I354:
   4249 	case WM_T_80003:
   4250 		sc->sc_pba = PBA_32K;
   4251 		break;
   4252 	case WM_T_82580:
   4253 	case WM_T_82580ER:
   4254 		sc->sc_pba = PBA_35K;
   4255 		break;
   4256 	case WM_T_I210:
   4257 	case WM_T_I211:
   4258 		sc->sc_pba = PBA_34K;
   4259 		break;
   4260 	case WM_T_82576:
   4261 		sc->sc_pba = PBA_64K;
   4262 		break;
   4263 	case WM_T_82573:
   4264 		sc->sc_pba = PBA_12K;
   4265 		break;
   4266 	case WM_T_82574:
   4267 	case WM_T_82583:
   4268 		sc->sc_pba = PBA_20K;
   4269 		break;
   4270 	case WM_T_ICH8:
   4271 		sc->sc_pba = PBA_8K;
   4272 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4273 		break;
   4274 	case WM_T_ICH9:
   4275 	case WM_T_ICH10:
   4276 		sc->sc_pba = PBA_10K;
   4277 		break;
   4278 	case WM_T_PCH:
   4279 	case WM_T_PCH2:
   4280 	case WM_T_PCH_LPT:
   4281 		sc->sc_pba = PBA_26K;
   4282 		break;
   4283 	default:
   4284 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4285 		    PBA_40K : PBA_48K;
   4286 		break;
   4287 	}
   4288 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4289 
   4290 	/* Prevent the PCI-E bus from sticking */
   4291 	if (sc->sc_flags & WM_F_PCIE) {
   4292 		int timeout = 800;
   4293 
   4294 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4295 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4296 
   4297 		while (timeout--) {
   4298 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4299 			    == 0)
   4300 				break;
   4301 			delay(100);
   4302 		}
   4303 	}
   4304 
   4305 	/* Set the completion timeout for interface */
   4306 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4307 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4308 		wm_set_pcie_completion_timeout(sc);
   4309 
   4310 	/* Clear interrupt */
   4311 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4312 
   4313 	/* Stop the transmit and receive processes. */
   4314 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4315 	sc->sc_rctl &= ~RCTL_EN;
   4316 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4317 	CSR_WRITE_FLUSH(sc);
   4318 
   4319 	/* XXX set_tbi_sbp_82543() */
   4320 
   4321 	delay(10*1000);
   4322 
   4323 	/* Must acquire the MDIO ownership before MAC reset */
   4324 	switch (sc->sc_type) {
   4325 	case WM_T_82573:
   4326 	case WM_T_82574:
   4327 	case WM_T_82583:
   4328 		error = wm_get_hw_semaphore_82573(sc);
   4329 		break;
   4330 	default:
   4331 		break;
   4332 	}
   4333 
   4334 	/*
   4335 	 * 82541 Errata 29? & 82547 Errata 28?
   4336 	 * See also the description about PHY_RST bit in CTRL register
   4337 	 * in 8254x_GBe_SDM.pdf.
   4338 	 */
   4339 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4340 		CSR_WRITE(sc, WMREG_CTRL,
   4341 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4342 		CSR_WRITE_FLUSH(sc);
   4343 		delay(5000);
   4344 	}
   4345 
   4346 	switch (sc->sc_type) {
   4347 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4348 	case WM_T_82541:
   4349 	case WM_T_82541_2:
   4350 	case WM_T_82547:
   4351 	case WM_T_82547_2:
   4352 		/*
   4353 		 * On some chipsets, a reset through a memory-mapped write
   4354 		 * cycle can cause the chip to reset before completing the
   4355 		 * write cycle.  This causes major headache that can be
   4356 		 * avoided by issuing the reset via indirect register writes
   4357 		 * through I/O space.
   4358 		 *
   4359 		 * So, if we successfully mapped the I/O BAR at attach time,
   4360 		 * use that.  Otherwise, try our luck with a memory-mapped
   4361 		 * reset.
   4362 		 */
   4363 		if (sc->sc_flags & WM_F_IOH_VALID)
   4364 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4365 		else
   4366 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4367 		break;
   4368 	case WM_T_82545_3:
   4369 	case WM_T_82546_3:
   4370 		/* Use the shadow control register on these chips. */
   4371 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4372 		break;
   4373 	case WM_T_80003:
   4374 		mask = swfwphysem[sc->sc_funcid];
   4375 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4376 		wm_get_swfw_semaphore(sc, mask);
   4377 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4378 		wm_put_swfw_semaphore(sc, mask);
   4379 		break;
   4380 	case WM_T_ICH8:
   4381 	case WM_T_ICH9:
   4382 	case WM_T_ICH10:
   4383 	case WM_T_PCH:
   4384 	case WM_T_PCH2:
   4385 	case WM_T_PCH_LPT:
   4386 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4387 		if (wm_check_reset_block(sc) == 0) {
   4388 			/*
   4389 			 * Gate automatic PHY configuration by hardware on
   4390 			 * non-managed 82579
   4391 			 */
   4392 			if ((sc->sc_type == WM_T_PCH2)
   4393 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4394 				!= 0))
   4395 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   4396 
   4397 
   4398 			reg |= CTRL_PHY_RESET;
   4399 			phy_reset = 1;
   4400 		}
   4401 		wm_get_swfwhw_semaphore(sc);
   4402 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4403 		/* Don't insert a completion barrier when reset */
   4404 		delay(20*1000);
   4405 		wm_put_swfwhw_semaphore(sc);
   4406 		break;
   4407 	case WM_T_82542_2_0:
   4408 	case WM_T_82542_2_1:
   4409 	case WM_T_82543:
   4410 	case WM_T_82540:
   4411 	case WM_T_82545:
   4412 	case WM_T_82546:
   4413 	case WM_T_82571:
   4414 	case WM_T_82572:
   4415 	case WM_T_82573:
   4416 	case WM_T_82574:
   4417 	case WM_T_82575:
   4418 	case WM_T_82576:
   4419 	case WM_T_82580:
   4420 	case WM_T_82580ER:
   4421 	case WM_T_82583:
   4422 	case WM_T_I350:
   4423 	case WM_T_I354:
   4424 	case WM_T_I210:
   4425 	case WM_T_I211:
   4426 	default:
   4427 		/* Everything else can safely use the documented method. */
   4428 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4429 		break;
   4430 	}
   4431 
   4432 	/* Must release the MDIO ownership after MAC reset */
   4433 	switch (sc->sc_type) {
   4434 	case WM_T_82573:
   4435 	case WM_T_82574:
   4436 	case WM_T_82583:
   4437 		if (error == 0)
   4438 			wm_put_hw_semaphore_82573(sc);
   4439 		break;
   4440 	default:
   4441 		break;
   4442 	}
   4443 
   4444 	if (phy_reset != 0)
   4445 		wm_get_cfg_done(sc);
   4446 
   4447 	/* reload EEPROM */
   4448 	switch (sc->sc_type) {
   4449 	case WM_T_82542_2_0:
   4450 	case WM_T_82542_2_1:
   4451 	case WM_T_82543:
   4452 	case WM_T_82544:
   4453 		delay(10);
   4454 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4455 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4456 		CSR_WRITE_FLUSH(sc);
   4457 		delay(2000);
   4458 		break;
   4459 	case WM_T_82540:
   4460 	case WM_T_82545:
   4461 	case WM_T_82545_3:
   4462 	case WM_T_82546:
   4463 	case WM_T_82546_3:
   4464 		delay(5*1000);
   4465 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4466 		break;
   4467 	case WM_T_82541:
   4468 	case WM_T_82541_2:
   4469 	case WM_T_82547:
   4470 	case WM_T_82547_2:
   4471 		delay(20000);
   4472 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4473 		break;
   4474 	case WM_T_82571:
   4475 	case WM_T_82572:
   4476 	case WM_T_82573:
   4477 	case WM_T_82574:
   4478 	case WM_T_82583:
   4479 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4480 			delay(10);
   4481 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4482 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4483 			CSR_WRITE_FLUSH(sc);
   4484 		}
   4485 		/* check EECD_EE_AUTORD */
   4486 		wm_get_auto_rd_done(sc);
   4487 		/*
   4488 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4489 		 * is set.
   4490 		 */
   4491 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4492 		    || (sc->sc_type == WM_T_82583))
   4493 			delay(25*1000);
   4494 		break;
   4495 	case WM_T_82575:
   4496 	case WM_T_82576:
   4497 	case WM_T_82580:
   4498 	case WM_T_82580ER:
   4499 	case WM_T_I350:
   4500 	case WM_T_I354:
   4501 	case WM_T_I210:
   4502 	case WM_T_I211:
   4503 	case WM_T_80003:
   4504 		/* check EECD_EE_AUTORD */
   4505 		wm_get_auto_rd_done(sc);
   4506 		break;
   4507 	case WM_T_ICH8:
   4508 	case WM_T_ICH9:
   4509 	case WM_T_ICH10:
   4510 	case WM_T_PCH:
   4511 	case WM_T_PCH2:
   4512 	case WM_T_PCH_LPT:
   4513 		break;
   4514 	default:
   4515 		panic("%s: unknown type\n", __func__);
   4516 	}
   4517 
   4518 	/* Check whether EEPROM is present or not */
   4519 	switch (sc->sc_type) {
   4520 	case WM_T_82575:
   4521 	case WM_T_82576:
   4522 #if 0 /* XXX */
   4523 	case WM_T_82580:
   4524 	case WM_T_82580ER:
   4525 #endif
   4526 	case WM_T_I350:
   4527 	case WM_T_I354:
   4528 	case WM_T_ICH8:
   4529 	case WM_T_ICH9:
   4530 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4531 			/* Not found */
   4532 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4533 			if ((sc->sc_type == WM_T_82575)
   4534 			    || (sc->sc_type == WM_T_82576)
   4535 			    || (sc->sc_type == WM_T_82580)
   4536 			    || (sc->sc_type == WM_T_82580ER)
   4537 			    || (sc->sc_type == WM_T_I350)
   4538 			    || (sc->sc_type == WM_T_I354))
   4539 				wm_reset_init_script_82575(sc);
   4540 		}
   4541 		break;
   4542 	default:
   4543 		break;
   4544 	}
   4545 
   4546 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   4547 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4548 		/* clear global device reset status bit */
   4549 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4550 	}
   4551 
   4552 	/* Clear any pending interrupt events. */
   4553 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4554 	reg = CSR_READ(sc, WMREG_ICR);
   4555 
   4556 	/* reload sc_ctrl */
   4557 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4558 
   4559 	if (sc->sc_type == WM_T_I350)
   4560 		wm_set_eee_i350(sc);
   4561 
   4562 	/* dummy read from WUC */
   4563 	if (sc->sc_type == WM_T_PCH)
   4564 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4565 	/*
   4566 	 * For PCH, this write will make sure that any noise will be detected
   4567 	 * as a CRC error and be dropped rather than show up as a bad packet
   4568 	 * to the DMA engine
   4569 	 */
   4570 	if (sc->sc_type == WM_T_PCH)
   4571 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4572 
   4573 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4574 		CSR_WRITE(sc, WMREG_WUC, 0);
   4575 
   4576 	/* XXX need special handling for 82580 */
   4577 }
   4578 
   4579 static void
   4580 wm_set_vlan(struct wm_softc *sc)
   4581 {
   4582 	/* Deal with VLAN enables. */
   4583 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4584 		sc->sc_ctrl |= CTRL_VME;
   4585 	else
   4586 		sc->sc_ctrl &= ~CTRL_VME;
   4587 
   4588 	/* Write the control registers. */
   4589 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4590 }
   4591 
   4592 /*
   4593  * wm_init:		[ifnet interface function]
   4594  *
   4595  *	Initialize the interface.
   4596  */
   4597 static int
   4598 wm_init(struct ifnet *ifp)
   4599 {
   4600 	struct wm_softc *sc = ifp->if_softc;
   4601 	int ret;
   4602 
   4603 	WM_LOCK(sc);
   4604 	ret = wm_init_locked(ifp);
   4605 	WM_UNLOCK(sc);
   4606 
   4607 	return ret;
   4608 }
   4609 
   4610 static int
   4611 wm_init_locked(struct ifnet *ifp)
   4612 {
   4613 	struct wm_softc *sc = ifp->if_softc;
   4614 	struct wm_rxsoft *rxs;
   4615 	int i, j, trynum, error = 0;
   4616 	uint32_t reg;
   4617 
   4618 	KASSERT(WM_LOCKED(sc));
   4619 	/*
   4620 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4621 	 * There is a small but measurable benefit to avoiding the adjusment
   4622 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4623 	 * on such platforms.  One possibility is that the DMA itself is
   4624 	 * slightly more efficient if the front of the entire packet (instead
   4625 	 * of the front of the headers) is aligned.
   4626 	 *
   4627 	 * Note we must always set align_tweak to 0 if we are using
   4628 	 * jumbo frames.
   4629 	 */
   4630 #ifdef __NO_STRICT_ALIGNMENT
   4631 	sc->sc_align_tweak = 0;
   4632 #else
   4633 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4634 		sc->sc_align_tweak = 0;
   4635 	else
   4636 		sc->sc_align_tweak = 2;
   4637 #endif /* __NO_STRICT_ALIGNMENT */
   4638 
   4639 	/* Cancel any pending I/O. */
   4640 	wm_stop_locked(ifp, 0);
   4641 
   4642 	/* update statistics before reset */
   4643 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4644 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4645 
   4646 	/* Reset the chip to a known state. */
   4647 	wm_reset(sc);
   4648 
   4649 	switch (sc->sc_type) {
   4650 	case WM_T_82571:
   4651 	case WM_T_82572:
   4652 	case WM_T_82573:
   4653 	case WM_T_82574:
   4654 	case WM_T_82583:
   4655 	case WM_T_80003:
   4656 	case WM_T_ICH8:
   4657 	case WM_T_ICH9:
   4658 	case WM_T_ICH10:
   4659 	case WM_T_PCH:
   4660 	case WM_T_PCH2:
   4661 	case WM_T_PCH_LPT:
   4662 		if (wm_check_mng_mode(sc) != 0)
   4663 			wm_get_hw_control(sc);
   4664 		break;
   4665 	default:
   4666 		break;
   4667 	}
   4668 
   4669 	/* Reset the PHY. */
   4670 	if (sc->sc_flags & WM_F_HAS_MII)
   4671 		wm_gmii_reset(sc);
   4672 
   4673 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4674 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
   4675 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   4676 	    || (sc->sc_type == WM_T_PCH_LPT))
   4677 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
   4678 
   4679 	/* Initialize the transmit descriptor ring. */
   4680 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   4681 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   4682 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4683 	sc->sc_txfree = WM_NTXDESC(sc);
   4684 	sc->sc_txnext = 0;
   4685 
   4686 	if (sc->sc_type < WM_T_82543) {
   4687 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4688 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4689 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   4690 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   4691 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   4692 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   4693 	} else {
   4694 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4695 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4696 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   4697 		CSR_WRITE(sc, WMREG_TDH, 0);
   4698 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
   4699 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
   4700 
   4701 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4702 			/*
   4703 			 * Don't write TDT before TCTL.EN is set.
   4704 			 * See the document.
   4705 			 */
   4706 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
   4707 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   4708 			    | TXDCTL_WTHRESH(0));
   4709 		else {
   4710 			CSR_WRITE(sc, WMREG_TDT, 0);
   4711 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   4712 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   4713 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   4714 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   4715 		}
   4716 	}
   4717 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   4718 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   4719 
   4720 	/* Initialize the transmit job descriptors. */
   4721 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   4722 		sc->sc_txsoft[i].txs_mbuf = NULL;
   4723 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   4724 	sc->sc_txsnext = 0;
   4725 	sc->sc_txsdirty = 0;
   4726 
   4727 	/*
   4728 	 * Initialize the receive descriptor and receive job
   4729 	 * descriptor rings.
   4730 	 */
   4731 	if (sc->sc_type < WM_T_82543) {
   4732 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   4733 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   4734 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   4735 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   4736 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   4737 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   4738 
   4739 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   4740 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   4741 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   4742 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   4743 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   4744 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   4745 	} else {
   4746 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   4747 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   4748 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   4749 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4750 			CSR_WRITE(sc, WMREG_EITR(0), 450);
   4751 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   4752 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   4753 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   4754 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   4755 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   4756 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   4757 			    | RXDCTL_WTHRESH(1));
   4758 		} else {
   4759 			CSR_WRITE(sc, WMREG_RDH, 0);
   4760 			CSR_WRITE(sc, WMREG_RDT, 0);
   4761 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   4762 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   4763 		}
   4764 	}
   4765 	for (i = 0; i < WM_NRXDESC; i++) {
   4766 		rxs = &sc->sc_rxsoft[i];
   4767 		if (rxs->rxs_mbuf == NULL) {
   4768 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   4769 				log(LOG_ERR, "%s: unable to allocate or map "
   4770 				    "rx buffer %d, error = %d\n",
   4771 				    device_xname(sc->sc_dev), i, error);
   4772 				/*
   4773 				 * XXX Should attempt to run with fewer receive
   4774 				 * XXX buffers instead of just failing.
   4775 				 */
   4776 				wm_rxdrain(sc);
   4777 				goto out;
   4778 			}
   4779 		} else {
   4780 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4781 				WM_INIT_RXDESC(sc, i);
   4782 			/*
   4783 			 * For 82575 and newer device, the RX descriptors
   4784 			 * must be initialized after the setting of RCTL.EN in
   4785 			 * wm_set_filter()
   4786 			 */
   4787 		}
   4788 	}
   4789 	sc->sc_rxptr = 0;
   4790 	sc->sc_rxdiscard = 0;
   4791 	WM_RXCHAIN_RESET(sc);
   4792 
   4793 	/*
   4794 	 * Clear out the VLAN table -- we don't use it (yet).
   4795 	 */
   4796 	CSR_WRITE(sc, WMREG_VET, 0);
   4797 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4798 		trynum = 10; /* Due to hw errata */
   4799 	else
   4800 		trynum = 1;
   4801 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4802 		for (j = 0; j < trynum; j++)
   4803 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4804 
   4805 	/*
   4806 	 * Set up flow-control parameters.
   4807 	 *
   4808 	 * XXX Values could probably stand some tuning.
   4809 	 */
   4810 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4811 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4812 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4813 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4814 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4815 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4816 	}
   4817 
   4818 	sc->sc_fcrtl = FCRTL_DFLT;
   4819 	if (sc->sc_type < WM_T_82543) {
   4820 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4821 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4822 	} else {
   4823 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4824 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4825 	}
   4826 
   4827 	if (sc->sc_type == WM_T_80003)
   4828 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4829 	else
   4830 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4831 
   4832 	/* Writes the control register. */
   4833 	wm_set_vlan(sc);
   4834 
   4835 	if (sc->sc_flags & WM_F_HAS_MII) {
   4836 		int val;
   4837 
   4838 		switch (sc->sc_type) {
   4839 		case WM_T_80003:
   4840 		case WM_T_ICH8:
   4841 		case WM_T_ICH9:
   4842 		case WM_T_ICH10:
   4843 		case WM_T_PCH:
   4844 		case WM_T_PCH2:
   4845 		case WM_T_PCH_LPT:
   4846 			/*
   4847 			 * Set the mac to wait the maximum time between each
   4848 			 * iteration and increase the max iterations when
   4849 			 * polling the phy; this fixes erroneous timeouts at
   4850 			 * 10Mbps.
   4851 			 */
   4852 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4853 			    0xFFFF);
   4854 			val = wm_kmrn_readreg(sc,
   4855 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4856 			val |= 0x3F;
   4857 			wm_kmrn_writereg(sc,
   4858 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4859 			break;
   4860 		default:
   4861 			break;
   4862 		}
   4863 
   4864 		if (sc->sc_type == WM_T_80003) {
   4865 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4866 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4867 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4868 
   4869 			/* Bypass RX and TX FIFO's */
   4870 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4871 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4872 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4873 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4874 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4875 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4876 		}
   4877 	}
   4878 #if 0
   4879 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4880 #endif
   4881 
   4882 	/*
   4883 	 * Set up checksum offload parameters.
   4884 	 */
   4885 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4886 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4887 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4888 		reg |= RXCSUM_IPOFL;
   4889 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4890 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4891 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4892 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4893 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4894 
   4895 	/* Reset TBI's RXCFG count */
   4896 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
   4897 
   4898 	/*
   4899 	 * Set up the interrupt registers.
   4900 	 */
   4901 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4902 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4903 	    ICR_RXO | ICR_RXT0;
   4904 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   4905 		sc->sc_icr |= ICR_RXCFG;
   4906 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4907 
   4908 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4909 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4910 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4911 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4912 		reg |= KABGTXD_BGSQLBIAS;
   4913 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4914 	}
   4915 
   4916 	/* Set up the inter-packet gap. */
   4917 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4918 
   4919 	if (sc->sc_type >= WM_T_82543) {
   4920 		/*
   4921 		 * Set up the interrupt throttling register (units of 256ns)
   4922 		 * Note that a footnote in Intel's documentation says this
   4923 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4924 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4925 		 * that that is also true for the 1024ns units of the other
   4926 		 * interrupt-related timer registers -- so, really, we ought
   4927 		 * to divide this value by 4 when the link speed is low.
   4928 		 *
   4929 		 * XXX implement this division at link speed change!
   4930 		 */
   4931 
   4932 		 /*
   4933 		  * For N interrupts/sec, set this value to:
   4934 		  * 1000000000 / (N * 256).  Note that we set the
   4935 		  * absolute and packet timer values to this value
   4936 		  * divided by 4 to get "simple timer" behavior.
   4937 		  */
   4938 
   4939 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4940 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4941 	}
   4942 
   4943 	/* Set the VLAN ethernetype. */
   4944 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4945 
   4946 	/*
   4947 	 * Set up the transmit control register; we start out with
   4948 	 * a collision distance suitable for FDX, but update it whe
   4949 	 * we resolve the media type.
   4950 	 */
   4951 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4952 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4953 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4954 	if (sc->sc_type >= WM_T_82571)
   4955 		sc->sc_tctl |= TCTL_MULR;
   4956 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4957 
   4958 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4959 		/*
   4960 		 * Write TDT after TCTL.EN is set.
   4961 		 * See the document.
   4962 		 */
   4963 		CSR_WRITE(sc, WMREG_TDT, 0);
   4964 	}
   4965 
   4966 	if (sc->sc_type == WM_T_80003) {
   4967 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4968 		reg &= ~TCTL_EXT_GCEX_MASK;
   4969 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4970 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4971 	}
   4972 
   4973 	/* Set the media. */
   4974 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4975 		goto out;
   4976 
   4977 	/* Configure for OS presence */
   4978 	wm_init_manageability(sc);
   4979 
   4980 	/*
   4981 	 * Set up the receive control register; we actually program
   4982 	 * the register when we set the receive filter.  Use multicast
   4983 	 * address offset type 0.
   4984 	 *
   4985 	 * Only the i82544 has the ability to strip the incoming
   4986 	 * CRC, so we don't enable that feature.
   4987 	 */
   4988 	sc->sc_mchash_type = 0;
   4989 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4990 	    | RCTL_MO(sc->sc_mchash_type);
   4991 
   4992 	/*
   4993 	 * The I350 has a bug where it always strips the CRC whether
   4994 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4995 	 */
   4996 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4997 	    || (sc->sc_type == WM_T_I210))
   4998 		sc->sc_rctl |= RCTL_SECRC;
   4999 
   5000 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5001 	    && (ifp->if_mtu > ETHERMTU)) {
   5002 		sc->sc_rctl |= RCTL_LPE;
   5003 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5004 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5005 	}
   5006 
   5007 	if (MCLBYTES == 2048) {
   5008 		sc->sc_rctl |= RCTL_2k;
   5009 	} else {
   5010 		if (sc->sc_type >= WM_T_82543) {
   5011 			switch (MCLBYTES) {
   5012 			case 4096:
   5013 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5014 				break;
   5015 			case 8192:
   5016 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5017 				break;
   5018 			case 16384:
   5019 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5020 				break;
   5021 			default:
   5022 				panic("wm_init: MCLBYTES %d unsupported",
   5023 				    MCLBYTES);
   5024 				break;
   5025 			}
   5026 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5027 	}
   5028 
   5029 	/* Set the receive filter. */
   5030 	wm_set_filter(sc);
   5031 
   5032 	/* Enable ECC */
   5033 	switch (sc->sc_type) {
   5034 	case WM_T_82571:
   5035 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5036 		reg |= PBA_ECC_CORR_EN;
   5037 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5038 		break;
   5039 	case WM_T_PCH_LPT:
   5040 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5041 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5042 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5043 
   5044 		reg = CSR_READ(sc, WMREG_CTRL);
   5045 		reg |= CTRL_MEHE;
   5046 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5047 		break;
   5048 	default:
   5049 		break;
   5050 	}
   5051 
   5052 	/* On 575 and later set RDT only if RX enabled */
   5053 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5054 		for (i = 0; i < WM_NRXDESC; i++)
   5055 			WM_INIT_RXDESC(sc, i);
   5056 
   5057 	sc->sc_stopping = false;
   5058 
   5059 	/* Start the one second link check clock. */
   5060 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5061 
   5062 	/* ...all done! */
   5063 	ifp->if_flags |= IFF_RUNNING;
   5064 	ifp->if_flags &= ~IFF_OACTIVE;
   5065 
   5066  out:
   5067 	sc->sc_if_flags = ifp->if_flags;
   5068 	if (error)
   5069 		log(LOG_ERR, "%s: interface not running\n",
   5070 		    device_xname(sc->sc_dev));
   5071 	return error;
   5072 }
   5073 
   5074 /*
   5075  * wm_rxdrain:
   5076  *
   5077  *	Drain the receive queue.
   5078  */
   5079 static void
   5080 wm_rxdrain(struct wm_softc *sc)
   5081 {
   5082 	struct wm_rxsoft *rxs;
   5083 	int i;
   5084 
   5085 	KASSERT(WM_LOCKED(sc));
   5086 
   5087 	for (i = 0; i < WM_NRXDESC; i++) {
   5088 		rxs = &sc->sc_rxsoft[i];
   5089 		if (rxs->rxs_mbuf != NULL) {
   5090 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5091 			m_freem(rxs->rxs_mbuf);
   5092 			rxs->rxs_mbuf = NULL;
   5093 		}
   5094 	}
   5095 }
   5096 
   5097 /*
   5098  * wm_stop:		[ifnet interface function]
   5099  *
   5100  *	Stop transmission on the interface.
   5101  */
   5102 static void
   5103 wm_stop(struct ifnet *ifp, int disable)
   5104 {
   5105 	struct wm_softc *sc = ifp->if_softc;
   5106 
   5107 	WM_LOCK(sc);
   5108 	wm_stop_locked(ifp, disable);
   5109 	WM_UNLOCK(sc);
   5110 }
   5111 
   5112 static void
   5113 wm_stop_locked(struct ifnet *ifp, int disable)
   5114 {
   5115 	struct wm_softc *sc = ifp->if_softc;
   5116 	struct wm_txsoft *txs;
   5117 	int i;
   5118 
   5119 	KASSERT(WM_LOCKED(sc));
   5120 
   5121 	sc->sc_stopping = true;
   5122 
   5123 	/* Stop the one second clock. */
   5124 	callout_stop(&sc->sc_tick_ch);
   5125 
   5126 	/* Stop the 82547 Tx FIFO stall check timer. */
   5127 	if (sc->sc_type == WM_T_82547)
   5128 		callout_stop(&sc->sc_txfifo_ch);
   5129 
   5130 	if (sc->sc_flags & WM_F_HAS_MII) {
   5131 		/* Down the MII. */
   5132 		mii_down(&sc->sc_mii);
   5133 	} else {
   5134 #if 0
   5135 		/* Should we clear PHY's status properly? */
   5136 		wm_reset(sc);
   5137 #endif
   5138 	}
   5139 
   5140 	/* Stop the transmit and receive processes. */
   5141 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5142 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5143 	sc->sc_rctl &= ~RCTL_EN;
   5144 
   5145 	/*
   5146 	 * Clear the interrupt mask to ensure the device cannot assert its
   5147 	 * interrupt line.
   5148 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   5149 	 * any currently pending or shared interrupt.
   5150 	 */
   5151 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5152 	sc->sc_icr = 0;
   5153 
   5154 	/* Release any queued transmit buffers. */
   5155 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   5156 		txs = &sc->sc_txsoft[i];
   5157 		if (txs->txs_mbuf != NULL) {
   5158 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5159 			m_freem(txs->txs_mbuf);
   5160 			txs->txs_mbuf = NULL;
   5161 		}
   5162 	}
   5163 
   5164 	/* Mark the interface as down and cancel the watchdog timer. */
   5165 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5166 	ifp->if_timer = 0;
   5167 
   5168 	if (disable)
   5169 		wm_rxdrain(sc);
   5170 
   5171 #if 0 /* notyet */
   5172 	if (sc->sc_type >= WM_T_82544)
   5173 		CSR_WRITE(sc, WMREG_WUC, 0);
   5174 #endif
   5175 }
   5176 
   5177 void
   5178 wm_get_auto_rd_done(struct wm_softc *sc)
   5179 {
   5180 	int i;
   5181 
   5182 	/* wait for eeprom to reload */
   5183 	switch (sc->sc_type) {
   5184 	case WM_T_82571:
   5185 	case WM_T_82572:
   5186 	case WM_T_82573:
   5187 	case WM_T_82574:
   5188 	case WM_T_82583:
   5189 	case WM_T_82575:
   5190 	case WM_T_82576:
   5191 	case WM_T_82580:
   5192 	case WM_T_82580ER:
   5193 	case WM_T_I350:
   5194 	case WM_T_I354:
   5195 	case WM_T_I210:
   5196 	case WM_T_I211:
   5197 	case WM_T_80003:
   5198 	case WM_T_ICH8:
   5199 	case WM_T_ICH9:
   5200 		for (i = 0; i < 10; i++) {
   5201 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   5202 				break;
   5203 			delay(1000);
   5204 		}
   5205 		if (i == 10) {
   5206 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   5207 			    "complete\n", device_xname(sc->sc_dev));
   5208 		}
   5209 		break;
   5210 	default:
   5211 		break;
   5212 	}
   5213 }
   5214 
   5215 void
   5216 wm_lan_init_done(struct wm_softc *sc)
   5217 {
   5218 	uint32_t reg = 0;
   5219 	int i;
   5220 
   5221 	/* wait for eeprom to reload */
   5222 	switch (sc->sc_type) {
   5223 	case WM_T_ICH10:
   5224 	case WM_T_PCH:
   5225 	case WM_T_PCH2:
   5226 	case WM_T_PCH_LPT:
   5227 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   5228 			reg = CSR_READ(sc, WMREG_STATUS);
   5229 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   5230 				break;
   5231 			delay(100);
   5232 		}
   5233 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   5234 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   5235 			    "complete\n", device_xname(sc->sc_dev), __func__);
   5236 		}
   5237 		break;
   5238 	default:
   5239 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   5240 		    __func__);
   5241 		break;
   5242 	}
   5243 
   5244 	reg &= ~STATUS_LAN_INIT_DONE;
   5245 	CSR_WRITE(sc, WMREG_STATUS, reg);
   5246 }
   5247 
   5248 void
   5249 wm_get_cfg_done(struct wm_softc *sc)
   5250 {
   5251 	int mask;
   5252 	uint32_t reg;
   5253 	int i;
   5254 
   5255 	/* wait for eeprom to reload */
   5256 	switch (sc->sc_type) {
   5257 	case WM_T_82542_2_0:
   5258 	case WM_T_82542_2_1:
   5259 		/* null */
   5260 		break;
   5261 	case WM_T_82543:
   5262 	case WM_T_82544:
   5263 	case WM_T_82540:
   5264 	case WM_T_82545:
   5265 	case WM_T_82545_3:
   5266 	case WM_T_82546:
   5267 	case WM_T_82546_3:
   5268 	case WM_T_82541:
   5269 	case WM_T_82541_2:
   5270 	case WM_T_82547:
   5271 	case WM_T_82547_2:
   5272 	case WM_T_82573:
   5273 	case WM_T_82574:
   5274 	case WM_T_82583:
   5275 		/* generic */
   5276 		delay(10*1000);
   5277 		break;
   5278 	case WM_T_80003:
   5279 	case WM_T_82571:
   5280 	case WM_T_82572:
   5281 	case WM_T_82575:
   5282 	case WM_T_82576:
   5283 	case WM_T_82580:
   5284 	case WM_T_82580ER:
   5285 	case WM_T_I350:
   5286 	case WM_T_I354:
   5287 	case WM_T_I210:
   5288 	case WM_T_I211:
   5289 		if (sc->sc_type == WM_T_82571) {
   5290 			/* Only 82571 shares port 0 */
   5291 			mask = EEMNGCTL_CFGDONE_0;
   5292 		} else
   5293 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   5294 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   5295 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   5296 				break;
   5297 			delay(1000);
   5298 		}
   5299 		if (i >= WM_PHY_CFG_TIMEOUT) {
   5300 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   5301 				device_xname(sc->sc_dev), __func__));
   5302 		}
   5303 		break;
   5304 	case WM_T_ICH8:
   5305 	case WM_T_ICH9:
   5306 	case WM_T_ICH10:
   5307 	case WM_T_PCH:
   5308 	case WM_T_PCH2:
   5309 	case WM_T_PCH_LPT:
   5310 		delay(10*1000);
   5311 		if (sc->sc_type >= WM_T_ICH10)
   5312 			wm_lan_init_done(sc);
   5313 		else
   5314 			wm_get_auto_rd_done(sc);
   5315 
   5316 		reg = CSR_READ(sc, WMREG_STATUS);
   5317 		if ((reg & STATUS_PHYRA) != 0)
   5318 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   5319 		break;
   5320 	default:
   5321 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   5322 		    __func__);
   5323 		break;
   5324 	}
   5325 }
   5326 
   5327 /*
   5328  * wm_acquire_eeprom:
   5329  *
   5330  *	Perform the EEPROM handshake required on some chips.
   5331  */
   5332 static int
   5333 wm_acquire_eeprom(struct wm_softc *sc)
   5334 {
   5335 	uint32_t reg;
   5336 	int x;
   5337 	int ret = 0;
   5338 
   5339 	/* always success */
   5340 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   5341 		return 0;
   5342 
   5343 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   5344 		ret = wm_get_swfwhw_semaphore(sc);
   5345 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   5346 		/* this will also do wm_get_swsm_semaphore() if needed */
   5347 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   5348 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   5349 		ret = wm_get_swsm_semaphore(sc);
   5350 	}
   5351 
   5352 	if (ret) {
   5353 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   5354 			__func__);
   5355 		return 1;
   5356 	}
   5357 
   5358 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   5359 		reg = CSR_READ(sc, WMREG_EECD);
   5360 
   5361 		/* Request EEPROM access. */
   5362 		reg |= EECD_EE_REQ;
   5363 		CSR_WRITE(sc, WMREG_EECD, reg);
   5364 
   5365 		/* ..and wait for it to be granted. */
   5366 		for (x = 0; x < 1000; x++) {
   5367 			reg = CSR_READ(sc, WMREG_EECD);
   5368 			if (reg & EECD_EE_GNT)
   5369 				break;
   5370 			delay(5);
   5371 		}
   5372 		if ((reg & EECD_EE_GNT) == 0) {
   5373 			aprint_error_dev(sc->sc_dev,
   5374 			    "could not acquire EEPROM GNT\n");
   5375 			reg &= ~EECD_EE_REQ;
   5376 			CSR_WRITE(sc, WMREG_EECD, reg);
   5377 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   5378 				wm_put_swfwhw_semaphore(sc);
   5379 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   5380 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   5381 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   5382 				wm_put_swsm_semaphore(sc);
   5383 			return 1;
   5384 		}
   5385 	}
   5386 
   5387 	return 0;
   5388 }
   5389 
   5390 /*
   5391  * wm_release_eeprom:
   5392  *
   5393  *	Release the EEPROM mutex.
   5394  */
   5395 static void
   5396 wm_release_eeprom(struct wm_softc *sc)
   5397 {
   5398 	uint32_t reg;
   5399 
   5400 	/* always success */
   5401 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   5402 		return;
   5403 
   5404 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   5405 		reg = CSR_READ(sc, WMREG_EECD);
   5406 		reg &= ~EECD_EE_REQ;
   5407 		CSR_WRITE(sc, WMREG_EECD, reg);
   5408 	}
   5409 
   5410 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   5411 		wm_put_swfwhw_semaphore(sc);
   5412 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   5413 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   5414 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   5415 		wm_put_swsm_semaphore(sc);
   5416 }
   5417 
   5418 /*
   5419  * wm_eeprom_sendbits:
   5420  *
   5421  *	Send a series of bits to the EEPROM.
   5422  */
   5423 static void
   5424 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   5425 {
   5426 	uint32_t reg;
   5427 	int x;
   5428 
   5429 	reg = CSR_READ(sc, WMREG_EECD);
   5430 
   5431 	for (x = nbits; x > 0; x--) {
   5432 		if (bits & (1U << (x - 1)))
   5433 			reg |= EECD_DI;
   5434 		else
   5435 			reg &= ~EECD_DI;
   5436 		CSR_WRITE(sc, WMREG_EECD, reg);
   5437 		CSR_WRITE_FLUSH(sc);
   5438 		delay(2);
   5439 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   5440 		CSR_WRITE_FLUSH(sc);
   5441 		delay(2);
   5442 		CSR_WRITE(sc, WMREG_EECD, reg);
   5443 		CSR_WRITE_FLUSH(sc);
   5444 		delay(2);
   5445 	}
   5446 }
   5447 
   5448 /*
   5449  * wm_eeprom_recvbits:
   5450  *
   5451  *	Receive a series of bits from the EEPROM.
   5452  */
   5453 static void
   5454 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   5455 {
   5456 	uint32_t reg, val;
   5457 	int x;
   5458 
   5459 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   5460 
   5461 	val = 0;
   5462 	for (x = nbits; x > 0; x--) {
   5463 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   5464 		CSR_WRITE_FLUSH(sc);
   5465 		delay(2);
   5466 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   5467 			val |= (1U << (x - 1));
   5468 		CSR_WRITE(sc, WMREG_EECD, reg);
   5469 		CSR_WRITE_FLUSH(sc);
   5470 		delay(2);
   5471 	}
   5472 	*valp = val;
   5473 }
   5474 
   5475 /*
   5476  * wm_read_eeprom_uwire:
   5477  *
   5478  *	Read a word from the EEPROM using the MicroWire protocol.
   5479  */
   5480 static int
   5481 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   5482 {
   5483 	uint32_t reg, val;
   5484 	int i;
   5485 
   5486 	for (i = 0; i < wordcnt; i++) {
   5487 		/* Clear SK and DI. */
   5488 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   5489 		CSR_WRITE(sc, WMREG_EECD, reg);
   5490 
   5491 		/*
   5492 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   5493 		 * and Xen.
   5494 		 *
   5495 		 * We use this workaround only for 82540 because qemu's
   5496 		 * e1000 act as 82540.
   5497 		 */
   5498 		if (sc->sc_type == WM_T_82540) {
   5499 			reg |= EECD_SK;
   5500 			CSR_WRITE(sc, WMREG_EECD, reg);
   5501 			reg &= ~EECD_SK;
   5502 			CSR_WRITE(sc, WMREG_EECD, reg);
   5503 			CSR_WRITE_FLUSH(sc);
   5504 			delay(2);
   5505 		}
   5506 		/* XXX: end of workaround */
   5507 
   5508 		/* Set CHIP SELECT. */
   5509 		reg |= EECD_CS;
   5510 		CSR_WRITE(sc, WMREG_EECD, reg);
   5511 		CSR_WRITE_FLUSH(sc);
   5512 		delay(2);
   5513 
   5514 		/* Shift in the READ command. */
   5515 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   5516 
   5517 		/* Shift in address. */
   5518 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   5519 
   5520 		/* Shift out the data. */
   5521 		wm_eeprom_recvbits(sc, &val, 16);
   5522 		data[i] = val & 0xffff;
   5523 
   5524 		/* Clear CHIP SELECT. */
   5525 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   5526 		CSR_WRITE(sc, WMREG_EECD, reg);
   5527 		CSR_WRITE_FLUSH(sc);
   5528 		delay(2);
   5529 	}
   5530 
   5531 	return 0;
   5532 }
   5533 
   5534 /*
   5535  * wm_spi_eeprom_ready:
   5536  *
   5537  *	Wait for a SPI EEPROM to be ready for commands.
   5538  */
   5539 static int
   5540 wm_spi_eeprom_ready(struct wm_softc *sc)
   5541 {
   5542 	uint32_t val;
   5543 	int usec;
   5544 
   5545 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   5546 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   5547 		wm_eeprom_recvbits(sc, &val, 8);
   5548 		if ((val & SPI_SR_RDY) == 0)
   5549 			break;
   5550 	}
   5551 	if (usec >= SPI_MAX_RETRIES) {
   5552 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   5553 		return 1;
   5554 	}
   5555 	return 0;
   5556 }
   5557 
   5558 /*
   5559  * wm_read_eeprom_spi:
   5560  *
   5561  *	Read a work from the EEPROM using the SPI protocol.
   5562  */
   5563 static int
   5564 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   5565 {
   5566 	uint32_t reg, val;
   5567 	int i;
   5568 	uint8_t opc;
   5569 
   5570 	/* Clear SK and CS. */
   5571 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   5572 	CSR_WRITE(sc, WMREG_EECD, reg);
   5573 	CSR_WRITE_FLUSH(sc);
   5574 	delay(2);
   5575 
   5576 	if (wm_spi_eeprom_ready(sc))
   5577 		return 1;
   5578 
   5579 	/* Toggle CS to flush commands. */
   5580 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   5581 	CSR_WRITE_FLUSH(sc);
   5582 	delay(2);
   5583 	CSR_WRITE(sc, WMREG_EECD, reg);
   5584 	CSR_WRITE_FLUSH(sc);
   5585 	delay(2);
   5586 
   5587 	opc = SPI_OPC_READ;
   5588 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   5589 		opc |= SPI_OPC_A8;
   5590 
   5591 	wm_eeprom_sendbits(sc, opc, 8);
   5592 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   5593 
   5594 	for (i = 0; i < wordcnt; i++) {
   5595 		wm_eeprom_recvbits(sc, &val, 16);
   5596 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   5597 	}
   5598 
   5599 	/* Raise CS and clear SK. */
   5600 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   5601 	CSR_WRITE(sc, WMREG_EECD, reg);
   5602 	CSR_WRITE_FLUSH(sc);
   5603 	delay(2);
   5604 
   5605 	return 0;
   5606 }
   5607 
   5608 #define NVM_CHECKSUM			0xBABA
   5609 #define EEPROM_SIZE			0x0040
   5610 #define NVM_COMPAT			0x0003
   5611 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
   5612 #define NVM_FUTURE_INIT_WORD1			0x0019
   5613 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
   5614 
   5615 /*
   5616  * wm_validate_eeprom_checksum
   5617  *
   5618  * The checksum is defined as the sum of the first 64 (16 bit) words.
   5619  */
   5620 static int
   5621 wm_validate_eeprom_checksum(struct wm_softc *sc)
   5622 {
   5623 	uint16_t checksum;
   5624 	uint16_t eeprom_data;
   5625 #ifdef WM_DEBUG
   5626 	uint16_t csum_wordaddr, valid_checksum;
   5627 #endif
   5628 	int i;
   5629 
   5630 	checksum = 0;
   5631 
   5632 	/* Don't check for I211 */
   5633 	if (sc->sc_type == WM_T_I211)
   5634 		return 0;
   5635 
   5636 #ifdef WM_DEBUG
   5637 	if (sc->sc_type == WM_T_PCH_LPT) {
   5638 		csum_wordaddr = NVM_COMPAT;
   5639 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   5640 	} else {
   5641 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
   5642 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   5643 	}
   5644 
   5645 	/* Dump EEPROM image for debug */
   5646 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5647 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5648 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   5649 		wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
   5650 		if ((eeprom_data & valid_checksum) == 0) {
   5651 			DPRINTF(WM_DEBUG_NVM,
   5652 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   5653 				device_xname(sc->sc_dev), eeprom_data,
   5654 				    valid_checksum));
   5655 		}
   5656 	}
   5657 
   5658 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   5659 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   5660 		for (i = 0; i < EEPROM_SIZE; i++) {
   5661 			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
   5662 				printf("XX ");
   5663 			else
   5664 				printf("%04x ", eeprom_data);
   5665 			if (i % 8 == 7)
   5666 				printf("\n");
   5667 		}
   5668 	}
   5669 
   5670 #endif /* WM_DEBUG */
   5671 
   5672 	for (i = 0; i < EEPROM_SIZE; i++) {
   5673 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
   5674 			return 1;
   5675 		checksum += eeprom_data;
   5676 	}
   5677 
   5678 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   5679 #ifdef WM_DEBUG
   5680 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   5681 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   5682 #endif
   5683 	}
   5684 
   5685 	return 0;
   5686 }
   5687 
   5688 /*
   5689  * wm_read_eeprom:
   5690  *
   5691  *	Read data from the serial EEPROM.
   5692  */
   5693 static int
   5694 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   5695 {
   5696 	int rv;
   5697 
   5698 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   5699 		return 1;
   5700 
   5701 	if (wm_acquire_eeprom(sc))
   5702 		return 1;
   5703 
   5704 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5705 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5706 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   5707 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
   5708 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   5709 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
   5710 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   5711 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
   5712 	else
   5713 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   5714 
   5715 	wm_release_eeprom(sc);
   5716 	return rv;
   5717 }
   5718 
   5719 static int
   5720 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
   5721     uint16_t *data)
   5722 {
   5723 	int i, eerd = 0;
   5724 	int error = 0;
   5725 
   5726 	for (i = 0; i < wordcnt; i++) {
   5727 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   5728 
   5729 		CSR_WRITE(sc, WMREG_EERD, eerd);
   5730 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   5731 		if (error != 0)
   5732 			break;
   5733 
   5734 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   5735 	}
   5736 
   5737 	return error;
   5738 }
   5739 
   5740 static int
   5741 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   5742 {
   5743 	uint32_t attempts = 100000;
   5744 	uint32_t i, reg = 0;
   5745 	int32_t done = -1;
   5746 
   5747 	for (i = 0; i < attempts; i++) {
   5748 		reg = CSR_READ(sc, rw);
   5749 
   5750 		if (reg & EERD_DONE) {
   5751 			done = 0;
   5752 			break;
   5753 		}
   5754 		delay(5);
   5755 	}
   5756 
   5757 	return done;
   5758 }
   5759 
   5760 static int
   5761 wm_check_alt_mac_addr(struct wm_softc *sc)
   5762 {
   5763 	uint16_t myea[ETHER_ADDR_LEN / 2];
   5764 	uint16_t offset = EEPROM_OFF_MACADDR;
   5765 
   5766 	/* Try to read alternative MAC address pointer */
   5767 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   5768 		return -1;
   5769 
   5770 	/* Check pointer */
   5771 	if (offset == 0xffff)
   5772 		return -1;
   5773 
   5774 	/*
   5775 	 * Check whether alternative MAC address is valid or not.
   5776 	 * Some cards have non 0xffff pointer but those don't use
   5777 	 * alternative MAC address in reality.
   5778 	 *
   5779 	 * Check whether the broadcast bit is set or not.
   5780 	 */
   5781 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
   5782 		if (((myea[0] & 0xff) & 0x01) == 0)
   5783 			return 0; /* found! */
   5784 
   5785 	/* not found */
   5786 	return -1;
   5787 }
   5788 
   5789 static int
   5790 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   5791 {
   5792 	uint16_t myea[ETHER_ADDR_LEN / 2];
   5793 	uint16_t offset = EEPROM_OFF_MACADDR;
   5794 	int do_invert = 0;
   5795 
   5796 	switch (sc->sc_type) {
   5797 	case WM_T_82580:
   5798 	case WM_T_82580ER:
   5799 	case WM_T_I350:
   5800 	case WM_T_I354:
   5801 		switch (sc->sc_funcid) {
   5802 		case 0:
   5803 			/* default value (== EEPROM_OFF_MACADDR) */
   5804 			break;
   5805 		case 1:
   5806 			offset = EEPROM_OFF_LAN1;
   5807 			break;
   5808 		case 2:
   5809 			offset = EEPROM_OFF_LAN2;
   5810 			break;
   5811 		case 3:
   5812 			offset = EEPROM_OFF_LAN3;
   5813 			break;
   5814 		default:
   5815 			goto bad;
   5816 			/* NOTREACHED */
   5817 			break;
   5818 		}
   5819 		break;
   5820 	case WM_T_82571:
   5821 	case WM_T_82575:
   5822 	case WM_T_82576:
   5823 	case WM_T_80003:
   5824 	case WM_T_I210:
   5825 	case WM_T_I211:
   5826 		if (wm_check_alt_mac_addr(sc) != 0) {
   5827 			/* reset the offset to LAN0 */
   5828 			offset = EEPROM_OFF_MACADDR;
   5829 			if ((sc->sc_funcid & 0x01) == 1)
   5830 				do_invert = 1;
   5831 			goto do_read;
   5832 		}
   5833 		switch (sc->sc_funcid) {
   5834 		case 0:
   5835 			/*
   5836 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
   5837 			 * itself.
   5838 			 */
   5839 			break;
   5840 		case 1:
   5841 			offset += EEPROM_OFF_MACADDR_LAN1;
   5842 			break;
   5843 		case 2:
   5844 			offset += EEPROM_OFF_MACADDR_LAN2;
   5845 			break;
   5846 		case 3:
   5847 			offset += EEPROM_OFF_MACADDR_LAN3;
   5848 			break;
   5849 		default:
   5850 			goto bad;
   5851 			/* NOTREACHED */
   5852 			break;
   5853 		}
   5854 		break;
   5855 	default:
   5856 		if ((sc->sc_funcid & 0x01) == 1)
   5857 			do_invert = 1;
   5858 		break;
   5859 	}
   5860 
   5861  do_read:
   5862 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
   5863 		myea) != 0) {
   5864 		goto bad;
   5865 	}
   5866 
   5867 	enaddr[0] = myea[0] & 0xff;
   5868 	enaddr[1] = myea[0] >> 8;
   5869 	enaddr[2] = myea[1] & 0xff;
   5870 	enaddr[3] = myea[1] >> 8;
   5871 	enaddr[4] = myea[2] & 0xff;
   5872 	enaddr[5] = myea[2] >> 8;
   5873 
   5874 	/*
   5875 	 * Toggle the LSB of the MAC address on the second port
   5876 	 * of some dual port cards.
   5877 	 */
   5878 	if (do_invert != 0)
   5879 		enaddr[5] ^= 1;
   5880 
   5881 	return 0;
   5882 
   5883  bad:
   5884 	return -1;
   5885 }
   5886 
   5887 /*
   5888  * wm_add_rxbuf:
   5889  *
   5890  *	Add a receive buffer to the indiciated descriptor.
   5891  */
   5892 static int
   5893 wm_add_rxbuf(struct wm_softc *sc, int idx)
   5894 {
   5895 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   5896 	struct mbuf *m;
   5897 	int error;
   5898 
   5899 	KASSERT(WM_LOCKED(sc));
   5900 
   5901 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5902 	if (m == NULL)
   5903 		return ENOBUFS;
   5904 
   5905 	MCLGET(m, M_DONTWAIT);
   5906 	if ((m->m_flags & M_EXT) == 0) {
   5907 		m_freem(m);
   5908 		return ENOBUFS;
   5909 	}
   5910 
   5911 	if (rxs->rxs_mbuf != NULL)
   5912 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5913 
   5914 	rxs->rxs_mbuf = m;
   5915 
   5916 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5917 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5918 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   5919 	if (error) {
   5920 		/* XXX XXX XXX */
   5921 		aprint_error_dev(sc->sc_dev,
   5922 		    "unable to load rx DMA map %d, error = %d\n",
   5923 		    idx, error);
   5924 		panic("wm_add_rxbuf");
   5925 	}
   5926 
   5927 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5928 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5929 
   5930 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5931 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5932 			WM_INIT_RXDESC(sc, idx);
   5933 	} else
   5934 		WM_INIT_RXDESC(sc, idx);
   5935 
   5936 	return 0;
   5937 }
   5938 
   5939 /*
   5940  * wm_set_ral:
   5941  *
   5942  *	Set an entery in the receive address list.
   5943  */
   5944 static void
   5945 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   5946 {
   5947 	uint32_t ral_lo, ral_hi;
   5948 
   5949 	if (enaddr != NULL) {
   5950 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   5951 		    (enaddr[3] << 24);
   5952 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   5953 		ral_hi |= RAL_AV;
   5954 	} else {
   5955 		ral_lo = 0;
   5956 		ral_hi = 0;
   5957 	}
   5958 
   5959 	if (sc->sc_type >= WM_T_82544) {
   5960 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   5961 		    ral_lo);
   5962 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   5963 		    ral_hi);
   5964 	} else {
   5965 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   5966 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   5967 	}
   5968 }
   5969 
   5970 /*
   5971  * wm_mchash:
   5972  *
   5973  *	Compute the hash of the multicast address for the 4096-bit
   5974  *	multicast filter.
   5975  */
   5976 static uint32_t
   5977 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   5978 {
   5979 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   5980 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   5981 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   5982 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   5983 	uint32_t hash;
   5984 
   5985 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5986 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5987 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   5988 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   5989 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   5990 		return (hash & 0x3ff);
   5991 	}
   5992 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   5993 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   5994 
   5995 	return (hash & 0xfff);
   5996 }
   5997 
   5998 /*
   5999  * wm_set_filter:
   6000  *
   6001  *	Set up the receive filter.
   6002  */
   6003 static void
   6004 wm_set_filter(struct wm_softc *sc)
   6005 {
   6006 	struct ethercom *ec = &sc->sc_ethercom;
   6007 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6008 	struct ether_multi *enm;
   6009 	struct ether_multistep step;
   6010 	bus_addr_t mta_reg;
   6011 	uint32_t hash, reg, bit;
   6012 	int i, size;
   6013 
   6014 	if (sc->sc_type >= WM_T_82544)
   6015 		mta_reg = WMREG_CORDOVA_MTA;
   6016 	else
   6017 		mta_reg = WMREG_MTA;
   6018 
   6019 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   6020 
   6021 	if (ifp->if_flags & IFF_BROADCAST)
   6022 		sc->sc_rctl |= RCTL_BAM;
   6023 	if (ifp->if_flags & IFF_PROMISC) {
   6024 		sc->sc_rctl |= RCTL_UPE;
   6025 		goto allmulti;
   6026 	}
   6027 
   6028 	/*
   6029 	 * Set the station address in the first RAL slot, and
   6030 	 * clear the remaining slots.
   6031 	 */
   6032 	if (sc->sc_type == WM_T_ICH8)
   6033 		size = WM_RAL_TABSIZE_ICH8 -1;
   6034 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   6035 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6036 	    || (sc->sc_type == WM_T_PCH_LPT))
   6037 		size = WM_RAL_TABSIZE_ICH8;
   6038 	else if (sc->sc_type == WM_T_82575)
   6039 		size = WM_RAL_TABSIZE_82575;
   6040 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   6041 		size = WM_RAL_TABSIZE_82576;
   6042 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6043 		size = WM_RAL_TABSIZE_I350;
   6044 	else
   6045 		size = WM_RAL_TABSIZE;
   6046 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   6047 	for (i = 1; i < size; i++)
   6048 		wm_set_ral(sc, NULL, i);
   6049 
   6050 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   6051 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   6052 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   6053 		size = WM_ICH8_MC_TABSIZE;
   6054 	else
   6055 		size = WM_MC_TABSIZE;
   6056 	/* Clear out the multicast table. */
   6057 	for (i = 0; i < size; i++)
   6058 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   6059 
   6060 	ETHER_FIRST_MULTI(step, ec, enm);
   6061 	while (enm != NULL) {
   6062 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   6063 			/*
   6064 			 * We must listen to a range of multicast addresses.
   6065 			 * For now, just accept all multicasts, rather than
   6066 			 * trying to set only those filter bits needed to match
   6067 			 * the range.  (At this time, the only use of address
   6068 			 * ranges is for IP multicast routing, for which the
   6069 			 * range is big enough to require all bits set.)
   6070 			 */
   6071 			goto allmulti;
   6072 		}
   6073 
   6074 		hash = wm_mchash(sc, enm->enm_addrlo);
   6075 
   6076 		reg = (hash >> 5);
   6077 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   6078 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   6079 		    || (sc->sc_type == WM_T_PCH2)
   6080 		    || (sc->sc_type == WM_T_PCH_LPT))
   6081 			reg &= 0x1f;
   6082 		else
   6083 			reg &= 0x7f;
   6084 		bit = hash & 0x1f;
   6085 
   6086 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   6087 		hash |= 1U << bit;
   6088 
   6089 		/* XXX Hardware bug?? */
   6090 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   6091 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   6092 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   6093 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   6094 		} else
   6095 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   6096 
   6097 		ETHER_NEXT_MULTI(step, enm);
   6098 	}
   6099 
   6100 	ifp->if_flags &= ~IFF_ALLMULTI;
   6101 	goto setit;
   6102 
   6103  allmulti:
   6104 	ifp->if_flags |= IFF_ALLMULTI;
   6105 	sc->sc_rctl |= RCTL_MPE;
   6106 
   6107  setit:
   6108 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   6109 }
   6110 
   6111 /*
   6112  * wm_tbi_mediainit:
   6113  *
   6114  *	Initialize media for use on 1000BASE-X devices.
   6115  */
   6116 static void
   6117 wm_tbi_mediainit(struct wm_softc *sc)
   6118 {
   6119 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6120 	const char *sep = "";
   6121 
   6122 	if (sc->sc_type < WM_T_82543)
   6123 		sc->sc_tipg = TIPG_WM_DFLT;
   6124 	else
   6125 		sc->sc_tipg = TIPG_LG_DFLT;
   6126 
   6127 	sc->sc_tbi_anegticks = 5;
   6128 
   6129 	/* Initialize our media structures */
   6130 	sc->sc_mii.mii_ifp = ifp;
   6131 
   6132 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6133 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   6134 	    wm_tbi_mediastatus);
   6135 
   6136 	/*
   6137 	 * SWD Pins:
   6138 	 *
   6139 	 *	0 = Link LED (output)
   6140 	 *	1 = Loss Of Signal (input)
   6141 	 */
   6142 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   6143 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   6144 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   6145 		sc->sc_ctrl &= ~CTRL_LRST;
   6146 
   6147 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6148 
   6149 #define	ADD(ss, mm, dd)							\
   6150 do {									\
   6151 	aprint_normal("%s%s", sep, ss);					\
   6152 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   6153 	sep = ", ";							\
   6154 } while (/*CONSTCOND*/0)
   6155 
   6156 	aprint_normal_dev(sc->sc_dev, "");
   6157 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   6158 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   6159 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   6160 	aprint_normal("\n");
   6161 
   6162 #undef ADD
   6163 
   6164 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   6165 }
   6166 
   6167 /*
   6168  * wm_tbi_mediastatus:	[ifmedia interface function]
   6169  *
   6170  *	Get the current interface media status on a 1000BASE-X device.
   6171  */
   6172 static void
   6173 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6174 {
   6175 	struct wm_softc *sc = ifp->if_softc;
   6176 	uint32_t ctrl, status;
   6177 
   6178 	ifmr->ifm_status = IFM_AVALID;
   6179 	ifmr->ifm_active = IFM_ETHER;
   6180 
   6181 	status = CSR_READ(sc, WMREG_STATUS);
   6182 	if ((status & STATUS_LU) == 0) {
   6183 		ifmr->ifm_active |= IFM_NONE;
   6184 		return;
   6185 	}
   6186 
   6187 	ifmr->ifm_status |= IFM_ACTIVE;
   6188 	ifmr->ifm_active |= IFM_1000_SX;
   6189 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   6190 		ifmr->ifm_active |= IFM_FDX;
   6191 	else
   6192 		ifmr->ifm_active |= IFM_HDX;
   6193 	ctrl = CSR_READ(sc, WMREG_CTRL);
   6194 	if (ctrl & CTRL_RFCE)
   6195 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   6196 	if (ctrl & CTRL_TFCE)
   6197 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   6198 }
   6199 
   6200 /*
   6201  * wm_tbi_mediachange:	[ifmedia interface function]
   6202  *
   6203  *	Set hardware to newly-selected media on a 1000BASE-X device.
   6204  */
   6205 static int
   6206 wm_tbi_mediachange(struct ifnet *ifp)
   6207 {
   6208 	struct wm_softc *sc = ifp->if_softc;
   6209 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6210 	uint32_t status;
   6211 	int i;
   6212 
   6213 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   6214 		return 0;
   6215 
   6216 	sc->sc_txcw = 0;
   6217 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
   6218 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   6219 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   6220 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6221 		sc->sc_txcw |= TXCW_ANE;
   6222 	} else {
   6223 		/*
   6224 		 * If autonegotiation is turned off, force link up and turn on
   6225 		 * full duplex
   6226 		 */
   6227 		sc->sc_txcw &= ~TXCW_ANE;
   6228 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
   6229 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   6230 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6231 		CSR_WRITE_FLUSH(sc);
   6232 		delay(1000);
   6233 	}
   6234 
   6235 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   6236 		    device_xname(sc->sc_dev),sc->sc_txcw));
   6237 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   6238 	CSR_WRITE_FLUSH(sc);
   6239 	delay(10000);
   6240 
   6241 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   6242 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   6243 
   6244 	/*
   6245 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   6246 	 * optics detect a signal, 0 if they don't.
   6247 	 */
   6248 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   6249 		/* Have signal; wait for the link to come up. */
   6250 
   6251 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6252 			/*
   6253 			 * Reset the link, and let autonegotiation do its thing
   6254 			 */
   6255 			sc->sc_ctrl |= CTRL_LRST;
   6256 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6257 			CSR_WRITE_FLUSH(sc);
   6258 			delay(1000);
   6259 			sc->sc_ctrl &= ~CTRL_LRST;
   6260 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6261 			CSR_WRITE_FLUSH(sc);
   6262 			delay(1000);
   6263 		}
   6264 
   6265 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   6266 			delay(10000);
   6267 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   6268 				break;
   6269 		}
   6270 
   6271 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   6272 			    device_xname(sc->sc_dev),i));
   6273 
   6274 		status = CSR_READ(sc, WMREG_STATUS);
   6275 		DPRINTF(WM_DEBUG_LINK,
   6276 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   6277 			device_xname(sc->sc_dev),status, STATUS_LU));
   6278 		if (status & STATUS_LU) {
   6279 			/* Link is up. */
   6280 			DPRINTF(WM_DEBUG_LINK,
   6281 			    ("%s: LINK: set media -> link up %s\n",
   6282 			    device_xname(sc->sc_dev),
   6283 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   6284 
   6285 			/*
   6286 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   6287 			 * so we should update sc->sc_ctrl
   6288 			 */
   6289 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   6290 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6291 			sc->sc_fcrtl &= ~FCRTL_XONE;
   6292 			if (status & STATUS_FD)
   6293 				sc->sc_tctl |=
   6294 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6295 			else
   6296 				sc->sc_tctl |=
   6297 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6298 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   6299 				sc->sc_fcrtl |= FCRTL_XONE;
   6300 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6301 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   6302 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   6303 				      sc->sc_fcrtl);
   6304 			sc->sc_tbi_linkup = 1;
   6305 		} else {
   6306 			if (i == WM_LINKUP_TIMEOUT)
   6307 				wm_check_for_link(sc);
   6308 			/* Link is down. */
   6309 			DPRINTF(WM_DEBUG_LINK,
   6310 			    ("%s: LINK: set media -> link down\n",
   6311 			    device_xname(sc->sc_dev)));
   6312 			sc->sc_tbi_linkup = 0;
   6313 		}
   6314 	} else {
   6315 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   6316 		    device_xname(sc->sc_dev)));
   6317 		sc->sc_tbi_linkup = 0;
   6318 	}
   6319 
   6320 	wm_tbi_set_linkled(sc);
   6321 
   6322 	return 0;
   6323 }
   6324 
   6325 /*
   6326  * wm_tbi_set_linkled:
   6327  *
   6328  *	Update the link LED on 1000BASE-X devices.
   6329  */
   6330 static void
   6331 wm_tbi_set_linkled(struct wm_softc *sc)
   6332 {
   6333 
   6334 	if (sc->sc_tbi_linkup)
   6335 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   6336 	else
   6337 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   6338 
   6339 	/* 82540 or newer devices are active low */
   6340 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   6341 
   6342 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6343 }
   6344 
   6345 /*
   6346  * wm_tbi_check_link:
   6347  *
   6348  *	Check the link on 1000BASE-X devices.
   6349  */
   6350 static void
   6351 wm_tbi_check_link(struct wm_softc *sc)
   6352 {
   6353 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6354 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6355 	uint32_t status;
   6356 
   6357 	KASSERT(WM_LOCKED(sc));
   6358 
   6359 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   6360 		sc->sc_tbi_linkup = 1;
   6361 		return;
   6362 	}
   6363 
   6364 	status = CSR_READ(sc, WMREG_STATUS);
   6365 
   6366 	/* XXX is this needed? */
   6367 	(void)CSR_READ(sc, WMREG_RXCW);
   6368 	(void)CSR_READ(sc, WMREG_CTRL);
   6369 
   6370 	/* set link status */
   6371 	if ((status & STATUS_LU) == 0) {
   6372 		DPRINTF(WM_DEBUG_LINK,
   6373 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
   6374 		sc->sc_tbi_linkup = 0;
   6375 	} else if (sc->sc_tbi_linkup == 0) {
   6376 		DPRINTF(WM_DEBUG_LINK,
   6377 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
   6378 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   6379 		sc->sc_tbi_linkup = 1;
   6380 	}
   6381 
   6382 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
   6383 	    && ((status & STATUS_LU) == 0)) {
   6384 		sc->sc_tbi_linkup = 0;
   6385 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
   6386 			/* RXCFG storm! */
   6387 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
   6388 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
   6389 			wm_init_locked(ifp);
   6390 			WM_UNLOCK(sc);
   6391 			ifp->if_start(ifp);
   6392 			WM_LOCK(sc);
   6393 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6394 			/* If the timer expired, retry autonegotiation */
   6395 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
   6396 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   6397 				sc->sc_tbi_ticks = 0;
   6398 				/*
   6399 				 * Reset the link, and let autonegotiation do
   6400 				 * its thing
   6401 				 */
   6402 				sc->sc_ctrl |= CTRL_LRST;
   6403 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6404 				CSR_WRITE_FLUSH(sc);
   6405 				delay(1000);
   6406 				sc->sc_ctrl &= ~CTRL_LRST;
   6407 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6408 				CSR_WRITE_FLUSH(sc);
   6409 				delay(1000);
   6410 				CSR_WRITE(sc, WMREG_TXCW,
   6411 				    sc->sc_txcw & ~TXCW_ANE);
   6412 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   6413 			}
   6414 		}
   6415 	}
   6416 
   6417 	wm_tbi_set_linkled(sc);
   6418 }
   6419 
   6420 /*
   6421  * wm_gmii_reset:
   6422  *
   6423  *	Reset the PHY.
   6424  */
   6425 static void
   6426 wm_gmii_reset(struct wm_softc *sc)
   6427 {
   6428 	uint32_t reg;
   6429 	int rv;
   6430 
   6431 	/* get phy semaphore */
   6432 	switch (sc->sc_type) {
   6433 	case WM_T_82571:
   6434 	case WM_T_82572:
   6435 	case WM_T_82573:
   6436 	case WM_T_82574:
   6437 	case WM_T_82583:
   6438 		 /* XXX should get sw semaphore, too */
   6439 		rv = wm_get_swsm_semaphore(sc);
   6440 		break;
   6441 	case WM_T_82575:
   6442 	case WM_T_82576:
   6443 	case WM_T_82580:
   6444 	case WM_T_82580ER:
   6445 	case WM_T_I350:
   6446 	case WM_T_I354:
   6447 	case WM_T_I210:
   6448 	case WM_T_I211:
   6449 	case WM_T_80003:
   6450 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6451 		break;
   6452 	case WM_T_ICH8:
   6453 	case WM_T_ICH9:
   6454 	case WM_T_ICH10:
   6455 	case WM_T_PCH:
   6456 	case WM_T_PCH2:
   6457 	case WM_T_PCH_LPT:
   6458 		rv = wm_get_swfwhw_semaphore(sc);
   6459 		break;
   6460 	default:
   6461 		/* nothing to do*/
   6462 		rv = 0;
   6463 		break;
   6464 	}
   6465 	if (rv != 0) {
   6466 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6467 		    __func__);
   6468 		return;
   6469 	}
   6470 
   6471 	switch (sc->sc_type) {
   6472 	case WM_T_82542_2_0:
   6473 	case WM_T_82542_2_1:
   6474 		/* null */
   6475 		break;
   6476 	case WM_T_82543:
   6477 		/*
   6478 		 * With 82543, we need to force speed and duplex on the MAC
   6479 		 * equal to what the PHY speed and duplex configuration is.
   6480 		 * In addition, we need to perform a hardware reset on the PHY
   6481 		 * to take it out of reset.
   6482 		 */
   6483 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6484 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6485 
   6486 		/* The PHY reset pin is active-low. */
   6487 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6488 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   6489 		    CTRL_EXT_SWDPIN(4));
   6490 		reg |= CTRL_EXT_SWDPIO(4);
   6491 
   6492 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6493 		CSR_WRITE_FLUSH(sc);
   6494 		delay(10*1000);
   6495 
   6496 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   6497 		CSR_WRITE_FLUSH(sc);
   6498 		delay(150);
   6499 #if 0
   6500 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   6501 #endif
   6502 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   6503 		break;
   6504 	case WM_T_82544:	/* reset 10000us */
   6505 	case WM_T_82540:
   6506 	case WM_T_82545:
   6507 	case WM_T_82545_3:
   6508 	case WM_T_82546:
   6509 	case WM_T_82546_3:
   6510 	case WM_T_82541:
   6511 	case WM_T_82541_2:
   6512 	case WM_T_82547:
   6513 	case WM_T_82547_2:
   6514 	case WM_T_82571:	/* reset 100us */
   6515 	case WM_T_82572:
   6516 	case WM_T_82573:
   6517 	case WM_T_82574:
   6518 	case WM_T_82575:
   6519 	case WM_T_82576:
   6520 	case WM_T_82580:
   6521 	case WM_T_82580ER:
   6522 	case WM_T_I350:
   6523 	case WM_T_I354:
   6524 	case WM_T_I210:
   6525 	case WM_T_I211:
   6526 	case WM_T_82583:
   6527 	case WM_T_80003:
   6528 		/* generic reset */
   6529 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6530 		CSR_WRITE_FLUSH(sc);
   6531 		delay(20000);
   6532 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6533 		CSR_WRITE_FLUSH(sc);
   6534 		delay(20000);
   6535 
   6536 		if ((sc->sc_type == WM_T_82541)
   6537 		    || (sc->sc_type == WM_T_82541_2)
   6538 		    || (sc->sc_type == WM_T_82547)
   6539 		    || (sc->sc_type == WM_T_82547_2)) {
   6540 			/* workaround for igp are done in igp_reset() */
   6541 			/* XXX add code to set LED after phy reset */
   6542 		}
   6543 		break;
   6544 	case WM_T_ICH8:
   6545 	case WM_T_ICH9:
   6546 	case WM_T_ICH10:
   6547 	case WM_T_PCH:
   6548 	case WM_T_PCH2:
   6549 	case WM_T_PCH_LPT:
   6550 		/* generic reset */
   6551 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6552 		CSR_WRITE_FLUSH(sc);
   6553 		delay(100);
   6554 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6555 		CSR_WRITE_FLUSH(sc);
   6556 		delay(150);
   6557 		break;
   6558 	default:
   6559 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   6560 		    __func__);
   6561 		break;
   6562 	}
   6563 
   6564 	/* release PHY semaphore */
   6565 	switch (sc->sc_type) {
   6566 	case WM_T_82571:
   6567 	case WM_T_82572:
   6568 	case WM_T_82573:
   6569 	case WM_T_82574:
   6570 	case WM_T_82583:
   6571 		 /* XXX should put sw semaphore, too */
   6572 		wm_put_swsm_semaphore(sc);
   6573 		break;
   6574 	case WM_T_82575:
   6575 	case WM_T_82576:
   6576 	case WM_T_82580:
   6577 	case WM_T_82580ER:
   6578 	case WM_T_I350:
   6579 	case WM_T_I354:
   6580 	case WM_T_I210:
   6581 	case WM_T_I211:
   6582 	case WM_T_80003:
   6583 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6584 		break;
   6585 	case WM_T_ICH8:
   6586 	case WM_T_ICH9:
   6587 	case WM_T_ICH10:
   6588 	case WM_T_PCH:
   6589 	case WM_T_PCH2:
   6590 	case WM_T_PCH_LPT:
   6591 		wm_put_swfwhw_semaphore(sc);
   6592 		break;
   6593 	default:
   6594 		/* nothing to do*/
   6595 		rv = 0;
   6596 		break;
   6597 	}
   6598 
   6599 	/* get_cfg_done */
   6600 	wm_get_cfg_done(sc);
   6601 
   6602 	/* extra setup */
   6603 	switch (sc->sc_type) {
   6604 	case WM_T_82542_2_0:
   6605 	case WM_T_82542_2_1:
   6606 	case WM_T_82543:
   6607 	case WM_T_82544:
   6608 	case WM_T_82540:
   6609 	case WM_T_82545:
   6610 	case WM_T_82545_3:
   6611 	case WM_T_82546:
   6612 	case WM_T_82546_3:
   6613 	case WM_T_82541_2:
   6614 	case WM_T_82547_2:
   6615 	case WM_T_82571:
   6616 	case WM_T_82572:
   6617 	case WM_T_82573:
   6618 	case WM_T_82574:
   6619 	case WM_T_82575:
   6620 	case WM_T_82576:
   6621 	case WM_T_82580:
   6622 	case WM_T_82580ER:
   6623 	case WM_T_I350:
   6624 	case WM_T_I354:
   6625 	case WM_T_I210:
   6626 	case WM_T_I211:
   6627 	case WM_T_82583:
   6628 	case WM_T_80003:
   6629 		/* null */
   6630 		break;
   6631 	case WM_T_82541:
   6632 	case WM_T_82547:
   6633 		/* XXX Configure actively LED after PHY reset */
   6634 		break;
   6635 	case WM_T_ICH8:
   6636 	case WM_T_ICH9:
   6637 	case WM_T_ICH10:
   6638 	case WM_T_PCH:
   6639 	case WM_T_PCH2:
   6640 	case WM_T_PCH_LPT:
   6641 		/* Allow time for h/w to get to a quiescent state afer reset */
   6642 		delay(10*1000);
   6643 
   6644 		if (sc->sc_type == WM_T_PCH)
   6645 			wm_hv_phy_workaround_ich8lan(sc);
   6646 
   6647 		if (sc->sc_type == WM_T_PCH2)
   6648 			wm_lv_phy_workaround_ich8lan(sc);
   6649 
   6650 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   6651 			/*
   6652 			 * dummy read to clear the phy wakeup bit after lcd
   6653 			 * reset
   6654 			 */
   6655 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   6656 		}
   6657 
   6658 		/*
   6659 		 * XXX Configure the LCD with th extended configuration region
   6660 		 * in NVM
   6661 		 */
   6662 
   6663 		/* Configure the LCD with the OEM bits in NVM */
   6664 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6665 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   6666 			/*
   6667 			 * Disable LPLU.
   6668 			 * XXX It seems that 82567 has LPLU, too.
   6669 			 */
   6670 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6671 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6672 			reg |= HV_OEM_BITS_ANEGNOW;
   6673 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6674 		}
   6675 		break;
   6676 	default:
   6677 		panic("%s: unknown type\n", __func__);
   6678 		break;
   6679 	}
   6680 }
   6681 
   6682 /*
   6683  * wm_get_phy_id_82575:
   6684  *
   6685  * Return PHY ID. Return -1 if it failed.
   6686  */
   6687 static int
   6688 wm_get_phy_id_82575(struct wm_softc *sc)
   6689 {
   6690 	uint32_t reg;
   6691 	int phyid = -1;
   6692 
   6693 	/* XXX */
   6694 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   6695 		return -1;
   6696 
   6697 	if (wm_sgmii_uses_mdio(sc)) {
   6698 		switch (sc->sc_type) {
   6699 		case WM_T_82575:
   6700 		case WM_T_82576:
   6701 			reg = CSR_READ(sc, WMREG_MDIC);
   6702 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   6703 			break;
   6704 		case WM_T_82580:
   6705 		case WM_T_I350:
   6706 		case WM_T_I354:
   6707 		case WM_T_I210:
   6708 		case WM_T_I211:
   6709 			reg = CSR_READ(sc, WMREG_MDICNFG);
   6710 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   6711 			break;
   6712 		default:
   6713 			return -1;
   6714 		}
   6715 	}
   6716 
   6717 	return phyid;
   6718 }
   6719 
   6720 
   6721 /*
   6722  * wm_gmii_mediainit:
   6723  *
   6724  *	Initialize media for use on 1000BASE-T devices.
   6725  */
   6726 static void
   6727 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6728 {
   6729 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6730 	struct mii_data *mii = &sc->sc_mii;
   6731 
   6732 	/* We have MII. */
   6733 	sc->sc_flags |= WM_F_HAS_MII;
   6734 
   6735 	if (sc->sc_type == WM_T_80003)
   6736 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6737 	else
   6738 		sc->sc_tipg = TIPG_1000T_DFLT;
   6739 
   6740 	/*
   6741 	 * Let the chip set speed/duplex on its own based on
   6742 	 * signals from the PHY.
   6743 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6744 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6745 	 */
   6746 	sc->sc_ctrl |= CTRL_SLU;
   6747 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6748 
   6749 	/* Initialize our media structures and probe the GMII. */
   6750 	mii->mii_ifp = ifp;
   6751 
   6752 	/*
   6753 	 * Determine the PHY access method.
   6754 	 *
   6755 	 *  For SGMII, use SGMII specific method.
   6756 	 *
   6757 	 *  For some devices, we can determine the PHY access method
   6758 	 * from sc_type.
   6759 	 *
   6760 	 *  For ICH8 variants, it's difficult to detemine the PHY access
   6761 	 * method by sc_type, so use the PCI product ID for some devices.
   6762 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6763 	 * can't detect, then use bm's method.
   6764 	 */
   6765 	switch (prodid) {
   6766 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6767 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6768 		/* 82577 */
   6769 		sc->sc_phytype = WMPHY_82577;
   6770 		mii->mii_readreg = wm_gmii_hv_readreg;
   6771 		mii->mii_writereg = wm_gmii_hv_writereg;
   6772 		break;
   6773 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6774 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6775 		/* 82578 */
   6776 		sc->sc_phytype = WMPHY_82578;
   6777 		mii->mii_readreg = wm_gmii_hv_readreg;
   6778 		mii->mii_writereg = wm_gmii_hv_writereg;
   6779 		break;
   6780 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6781 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6782 		/* 82579 */
   6783 		sc->sc_phytype = WMPHY_82579;
   6784 		mii->mii_readreg = wm_gmii_hv_readreg;
   6785 		mii->mii_writereg = wm_gmii_hv_writereg;
   6786 		break;
   6787 	case PCI_PRODUCT_INTEL_I217_LM:
   6788 	case PCI_PRODUCT_INTEL_I217_V:
   6789 	case PCI_PRODUCT_INTEL_I218_LM:
   6790 	case PCI_PRODUCT_INTEL_I218_V:
   6791 		/* I21[78] */
   6792 		mii->mii_readreg = wm_gmii_hv_readreg;
   6793 		mii->mii_writereg = wm_gmii_hv_writereg;
   6794 		break;
   6795 	case PCI_PRODUCT_INTEL_82801I_BM:
   6796 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6797 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6798 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6799 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6800 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6801 		/* 82567 */
   6802 		sc->sc_phytype = WMPHY_BM;
   6803 		mii->mii_readreg = wm_gmii_bm_readreg;
   6804 		mii->mii_writereg = wm_gmii_bm_writereg;
   6805 		break;
   6806 	default:
   6807 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   6808 		    && !wm_sgmii_uses_mdio(sc)){
   6809 			mii->mii_readreg = wm_sgmii_readreg;
   6810 			mii->mii_writereg = wm_sgmii_writereg;
   6811 		} else if (sc->sc_type >= WM_T_80003) {
   6812 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6813 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6814 		} else if (sc->sc_type >= WM_T_I210) {
   6815 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6816 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6817 		} else if (sc->sc_type >= WM_T_82580) {
   6818 			sc->sc_phytype = WMPHY_82580;
   6819 			mii->mii_readreg = wm_gmii_82580_readreg;
   6820 			mii->mii_writereg = wm_gmii_82580_writereg;
   6821 		} else if (sc->sc_type >= WM_T_82544) {
   6822 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6823 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6824 		} else {
   6825 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6826 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6827 		}
   6828 		break;
   6829 	}
   6830 	mii->mii_statchg = wm_gmii_statchg;
   6831 
   6832 	wm_gmii_reset(sc);
   6833 
   6834 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6835 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6836 	    wm_gmii_mediastatus);
   6837 
   6838 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6839 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6840 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6841 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   6842 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6843 			/* Attach only one port */
   6844 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6845 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6846 		} else {
   6847 			int i, id;
   6848 			uint32_t ctrl_ext;
   6849 
   6850 			id = wm_get_phy_id_82575(sc);
   6851 			if (id != -1) {
   6852 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6853 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6854 			}
   6855 			if ((id == -1)
   6856 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6857 				/* Power on sgmii phy if it is disabled */
   6858 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6859 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   6860 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6861 				CSR_WRITE_FLUSH(sc);
   6862 				delay(300*1000); /* XXX too long */
   6863 
   6864 				/* from 1 to 8 */
   6865 				for (i = 1; i < 8; i++)
   6866 					mii_attach(sc->sc_dev, &sc->sc_mii,
   6867 					    0xffffffff, i, MII_OFFSET_ANY,
   6868 					    MIIF_DOPAUSE);
   6869 
   6870 				/* restore previous sfp cage power state */
   6871 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6872 			}
   6873 		}
   6874 	} else {
   6875 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6876 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6877 	}
   6878 
   6879 	/*
   6880 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6881 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6882 	 */
   6883 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6884 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6885 		wm_set_mdio_slow_mode_hv(sc);
   6886 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6887 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6888 	}
   6889 
   6890 	/*
   6891 	 * (For ICH8 variants)
   6892 	 * If PHY detection failed, use BM's r/w function and retry.
   6893 	 */
   6894 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6895 		/* if failed, retry with *_bm_* */
   6896 		mii->mii_readreg = wm_gmii_bm_readreg;
   6897 		mii->mii_writereg = wm_gmii_bm_writereg;
   6898 
   6899 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6900 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6901 	}
   6902 
   6903 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6904 		/* Any PHY wasn't find */
   6905 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6906 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6907 		sc->sc_phytype = WMPHY_NONE;
   6908 	} else {
   6909 		/*
   6910 		 * PHY Found!
   6911 		 * Check PHY type.
   6912 		 */
   6913 		uint32_t model;
   6914 		struct mii_softc *child;
   6915 
   6916 		child = LIST_FIRST(&mii->mii_phys);
   6917 		if (device_is_a(child->mii_dev, "igphy")) {
   6918 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6919 
   6920 			model = isc->sc_mii.mii_mpd_model;
   6921 			if (model == MII_MODEL_yyINTEL_I82566)
   6922 				sc->sc_phytype = WMPHY_IGP_3;
   6923 		}
   6924 
   6925 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6926 	}
   6927 }
   6928 
   6929 /*
   6930  * wm_gmii_mediastatus:	[ifmedia interface function]
   6931  *
   6932  *	Get the current interface media status on a 1000BASE-T device.
   6933  */
   6934 static void
   6935 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6936 {
   6937 	struct wm_softc *sc = ifp->if_softc;
   6938 
   6939 	ether_mediastatus(ifp, ifmr);
   6940 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6941 	    | sc->sc_flowflags;
   6942 }
   6943 
   6944 /*
   6945  * wm_gmii_mediachange:	[ifmedia interface function]
   6946  *
   6947  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6948  */
   6949 static int
   6950 wm_gmii_mediachange(struct ifnet *ifp)
   6951 {
   6952 	struct wm_softc *sc = ifp->if_softc;
   6953 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6954 	int rc;
   6955 
   6956 	if ((ifp->if_flags & IFF_UP) == 0)
   6957 		return 0;
   6958 
   6959 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6960 	sc->sc_ctrl |= CTRL_SLU;
   6961 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6962 	    || (sc->sc_type > WM_T_82543)) {
   6963 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6964 	} else {
   6965 		sc->sc_ctrl &= ~CTRL_ASDE;
   6966 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6967 		if (ife->ifm_media & IFM_FDX)
   6968 			sc->sc_ctrl |= CTRL_FD;
   6969 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6970 		case IFM_10_T:
   6971 			sc->sc_ctrl |= CTRL_SPEED_10;
   6972 			break;
   6973 		case IFM_100_TX:
   6974 			sc->sc_ctrl |= CTRL_SPEED_100;
   6975 			break;
   6976 		case IFM_1000_T:
   6977 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6978 			break;
   6979 		default:
   6980 			panic("wm_gmii_mediachange: bad media 0x%x",
   6981 			    ife->ifm_media);
   6982 		}
   6983 	}
   6984 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6985 	if (sc->sc_type <= WM_T_82543)
   6986 		wm_gmii_reset(sc);
   6987 
   6988 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6989 		return 0;
   6990 	return rc;
   6991 }
   6992 
   6993 #define	MDI_IO		CTRL_SWDPIN(2)
   6994 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6995 #define	MDI_CLK		CTRL_SWDPIN(3)
   6996 
   6997 static void
   6998 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6999 {
   7000 	uint32_t i, v;
   7001 
   7002 	v = CSR_READ(sc, WMREG_CTRL);
   7003 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   7004 	v |= MDI_DIR | CTRL_SWDPIO(3);
   7005 
   7006 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   7007 		if (data & i)
   7008 			v |= MDI_IO;
   7009 		else
   7010 			v &= ~MDI_IO;
   7011 		CSR_WRITE(sc, WMREG_CTRL, v);
   7012 		CSR_WRITE_FLUSH(sc);
   7013 		delay(10);
   7014 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7015 		CSR_WRITE_FLUSH(sc);
   7016 		delay(10);
   7017 		CSR_WRITE(sc, WMREG_CTRL, v);
   7018 		CSR_WRITE_FLUSH(sc);
   7019 		delay(10);
   7020 	}
   7021 }
   7022 
   7023 static uint32_t
   7024 i82543_mii_recvbits(struct wm_softc *sc)
   7025 {
   7026 	uint32_t v, i, data = 0;
   7027 
   7028 	v = CSR_READ(sc, WMREG_CTRL);
   7029 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   7030 	v |= CTRL_SWDPIO(3);
   7031 
   7032 	CSR_WRITE(sc, WMREG_CTRL, v);
   7033 	CSR_WRITE_FLUSH(sc);
   7034 	delay(10);
   7035 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7036 	CSR_WRITE_FLUSH(sc);
   7037 	delay(10);
   7038 	CSR_WRITE(sc, WMREG_CTRL, v);
   7039 	CSR_WRITE_FLUSH(sc);
   7040 	delay(10);
   7041 
   7042 	for (i = 0; i < 16; i++) {
   7043 		data <<= 1;
   7044 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7045 		CSR_WRITE_FLUSH(sc);
   7046 		delay(10);
   7047 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   7048 			data |= 1;
   7049 		CSR_WRITE(sc, WMREG_CTRL, v);
   7050 		CSR_WRITE_FLUSH(sc);
   7051 		delay(10);
   7052 	}
   7053 
   7054 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   7055 	CSR_WRITE_FLUSH(sc);
   7056 	delay(10);
   7057 	CSR_WRITE(sc, WMREG_CTRL, v);
   7058 	CSR_WRITE_FLUSH(sc);
   7059 	delay(10);
   7060 
   7061 	return data;
   7062 }
   7063 
   7064 #undef MDI_IO
   7065 #undef MDI_DIR
   7066 #undef MDI_CLK
   7067 
   7068 /*
   7069  * wm_gmii_i82543_readreg:	[mii interface function]
   7070  *
   7071  *	Read a PHY register on the GMII (i82543 version).
   7072  */
   7073 static int
   7074 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   7075 {
   7076 	struct wm_softc *sc = device_private(self);
   7077 	int rv;
   7078 
   7079 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   7080 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   7081 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   7082 	rv = i82543_mii_recvbits(sc) & 0xffff;
   7083 
   7084 	DPRINTF(WM_DEBUG_GMII,
   7085 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   7086 	    device_xname(sc->sc_dev), phy, reg, rv));
   7087 
   7088 	return rv;
   7089 }
   7090 
   7091 /*
   7092  * wm_gmii_i82543_writereg:	[mii interface function]
   7093  *
   7094  *	Write a PHY register on the GMII (i82543 version).
   7095  */
   7096 static void
   7097 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   7098 {
   7099 	struct wm_softc *sc = device_private(self);
   7100 
   7101 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   7102 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   7103 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   7104 	    (MII_COMMAND_START << 30), 32);
   7105 }
   7106 
   7107 /*
   7108  * wm_gmii_i82544_readreg:	[mii interface function]
   7109  *
   7110  *	Read a PHY register on the GMII.
   7111  */
   7112 static int
   7113 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   7114 {
   7115 	struct wm_softc *sc = device_private(self);
   7116 	uint32_t mdic = 0;
   7117 	int i, rv;
   7118 
   7119 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   7120 	    MDIC_REGADD(reg));
   7121 
   7122 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7123 		mdic = CSR_READ(sc, WMREG_MDIC);
   7124 		if (mdic & MDIC_READY)
   7125 			break;
   7126 		delay(50);
   7127 	}
   7128 
   7129 	if ((mdic & MDIC_READY) == 0) {
   7130 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   7131 		    device_xname(sc->sc_dev), phy, reg);
   7132 		rv = 0;
   7133 	} else if (mdic & MDIC_E) {
   7134 #if 0 /* This is normal if no PHY is present. */
   7135 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   7136 		    device_xname(sc->sc_dev), phy, reg);
   7137 #endif
   7138 		rv = 0;
   7139 	} else {
   7140 		rv = MDIC_DATA(mdic);
   7141 		if (rv == 0xffff)
   7142 			rv = 0;
   7143 	}
   7144 
   7145 	return rv;
   7146 }
   7147 
   7148 /*
   7149  * wm_gmii_i82544_writereg:	[mii interface function]
   7150  *
   7151  *	Write a PHY register on the GMII.
   7152  */
   7153 static void
   7154 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   7155 {
   7156 	struct wm_softc *sc = device_private(self);
   7157 	uint32_t mdic = 0;
   7158 	int i;
   7159 
   7160 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   7161 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   7162 
   7163 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   7164 		mdic = CSR_READ(sc, WMREG_MDIC);
   7165 		if (mdic & MDIC_READY)
   7166 			break;
   7167 		delay(50);
   7168 	}
   7169 
   7170 	if ((mdic & MDIC_READY) == 0)
   7171 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   7172 		    device_xname(sc->sc_dev), phy, reg);
   7173 	else if (mdic & MDIC_E)
   7174 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   7175 		    device_xname(sc->sc_dev), phy, reg);
   7176 }
   7177 
   7178 /*
   7179  * wm_gmii_i80003_readreg:	[mii interface function]
   7180  *
   7181  *	Read a PHY register on the kumeran
   7182  * This could be handled by the PHY layer if we didn't have to lock the
   7183  * ressource ...
   7184  */
   7185 static int
   7186 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   7187 {
   7188 	struct wm_softc *sc = device_private(self);
   7189 	int sem;
   7190 	int rv;
   7191 
   7192 	if (phy != 1) /* only one PHY on kumeran bus */
   7193 		return 0;
   7194 
   7195 	sem = swfwphysem[sc->sc_funcid];
   7196 	if (wm_get_swfw_semaphore(sc, sem)) {
   7197 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7198 		    __func__);
   7199 		return 0;
   7200 	}
   7201 
   7202 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7203 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7204 		    reg >> GG82563_PAGE_SHIFT);
   7205 	} else {
   7206 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   7207 		    reg >> GG82563_PAGE_SHIFT);
   7208 	}
   7209 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   7210 	delay(200);
   7211 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   7212 	delay(200);
   7213 
   7214 	wm_put_swfw_semaphore(sc, sem);
   7215 	return rv;
   7216 }
   7217 
   7218 /*
   7219  * wm_gmii_i80003_writereg:	[mii interface function]
   7220  *
   7221  *	Write a PHY register on the kumeran.
   7222  * This could be handled by the PHY layer if we didn't have to lock the
   7223  * ressource ...
   7224  */
   7225 static void
   7226 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   7227 {
   7228 	struct wm_softc *sc = device_private(self);
   7229 	int sem;
   7230 
   7231 	if (phy != 1) /* only one PHY on kumeran bus */
   7232 		return;
   7233 
   7234 	sem = swfwphysem[sc->sc_funcid];
   7235 	if (wm_get_swfw_semaphore(sc, sem)) {
   7236 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7237 		    __func__);
   7238 		return;
   7239 	}
   7240 
   7241 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   7242 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   7243 		    reg >> GG82563_PAGE_SHIFT);
   7244 	} else {
   7245 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   7246 		    reg >> GG82563_PAGE_SHIFT);
   7247 	}
   7248 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   7249 	delay(200);
   7250 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   7251 	delay(200);
   7252 
   7253 	wm_put_swfw_semaphore(sc, sem);
   7254 }
   7255 
   7256 /*
   7257  * wm_gmii_bm_readreg:	[mii interface function]
   7258  *
   7259  *	Read a PHY register on the kumeran
   7260  * This could be handled by the PHY layer if we didn't have to lock the
   7261  * ressource ...
   7262  */
   7263 static int
   7264 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   7265 {
   7266 	struct wm_softc *sc = device_private(self);
   7267 	int sem;
   7268 	int rv;
   7269 
   7270 	sem = swfwphysem[sc->sc_funcid];
   7271 	if (wm_get_swfw_semaphore(sc, sem)) {
   7272 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7273 		    __func__);
   7274 		return 0;
   7275 	}
   7276 
   7277 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   7278 		if (phy == 1)
   7279 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   7280 			    reg);
   7281 		else
   7282 			wm_gmii_i82544_writereg(self, phy,
   7283 			    GG82563_PHY_PAGE_SELECT,
   7284 			    reg >> GG82563_PAGE_SHIFT);
   7285 	}
   7286 
   7287 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   7288 	wm_put_swfw_semaphore(sc, sem);
   7289 	return rv;
   7290 }
   7291 
   7292 /*
   7293  * wm_gmii_bm_writereg:	[mii interface function]
   7294  *
   7295  *	Write a PHY register on the kumeran.
   7296  * This could be handled by the PHY layer if we didn't have to lock the
   7297  * ressource ...
   7298  */
   7299 static void
   7300 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   7301 {
   7302 	struct wm_softc *sc = device_private(self);
   7303 	int sem;
   7304 
   7305 	sem = swfwphysem[sc->sc_funcid];
   7306 	if (wm_get_swfw_semaphore(sc, sem)) {
   7307 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7308 		    __func__);
   7309 		return;
   7310 	}
   7311 
   7312 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   7313 		if (phy == 1)
   7314 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   7315 			    reg);
   7316 		else
   7317 			wm_gmii_i82544_writereg(self, phy,
   7318 			    GG82563_PHY_PAGE_SELECT,
   7319 			    reg >> GG82563_PAGE_SHIFT);
   7320 	}
   7321 
   7322 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   7323 	wm_put_swfw_semaphore(sc, sem);
   7324 }
   7325 
   7326 static void
   7327 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   7328 {
   7329 	struct wm_softc *sc = device_private(self);
   7330 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   7331 	uint16_t wuce;
   7332 
   7333 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   7334 	if (sc->sc_type == WM_T_PCH) {
   7335 		/* XXX e1000 driver do nothing... why? */
   7336 	}
   7337 
   7338 	/* Set page 769 */
   7339 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7340 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   7341 
   7342 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   7343 
   7344 	wuce &= ~BM_WUC_HOST_WU_BIT;
   7345 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   7346 	    wuce | BM_WUC_ENABLE_BIT);
   7347 
   7348 	/* Select page 800 */
   7349 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7350 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   7351 
   7352 	/* Write page 800 */
   7353 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   7354 
   7355 	if (rd)
   7356 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   7357 	else
   7358 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   7359 
   7360 	/* Set page 769 */
   7361 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7362 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   7363 
   7364 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   7365 }
   7366 
   7367 /*
   7368  * wm_gmii_hv_readreg:	[mii interface function]
   7369  *
   7370  *	Read a PHY register on the kumeran
   7371  * This could be handled by the PHY layer if we didn't have to lock the
   7372  * ressource ...
   7373  */
   7374 static int
   7375 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   7376 {
   7377 	struct wm_softc *sc = device_private(self);
   7378 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7379 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7380 	uint16_t val;
   7381 	int rv;
   7382 
   7383 	if (wm_get_swfwhw_semaphore(sc)) {
   7384 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7385 		    __func__);
   7386 		return 0;
   7387 	}
   7388 
   7389 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7390 	if (sc->sc_phytype == WMPHY_82577) {
   7391 		/* XXX must write */
   7392 	}
   7393 
   7394 	/* Page 800 works differently than the rest so it has its own func */
   7395 	if (page == BM_WUC_PAGE) {
   7396 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   7397 		return val;
   7398 	}
   7399 
   7400 	/*
   7401 	 * Lower than page 768 works differently than the rest so it has its
   7402 	 * own func
   7403 	 */
   7404 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7405 		printf("gmii_hv_readreg!!!\n");
   7406 		return 0;
   7407 	}
   7408 
   7409 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7410 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7411 		    page << BME1000_PAGE_SHIFT);
   7412 	}
   7413 
   7414 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   7415 	wm_put_swfwhw_semaphore(sc);
   7416 	return rv;
   7417 }
   7418 
   7419 /*
   7420  * wm_gmii_hv_writereg:	[mii interface function]
   7421  *
   7422  *	Write a PHY register on the kumeran.
   7423  * This could be handled by the PHY layer if we didn't have to lock the
   7424  * ressource ...
   7425  */
   7426 static void
   7427 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   7428 {
   7429 	struct wm_softc *sc = device_private(self);
   7430 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7431 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7432 
   7433 	if (wm_get_swfwhw_semaphore(sc)) {
   7434 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7435 		    __func__);
   7436 		return;
   7437 	}
   7438 
   7439 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7440 
   7441 	/* Page 800 works differently than the rest so it has its own func */
   7442 	if (page == BM_WUC_PAGE) {
   7443 		uint16_t tmp;
   7444 
   7445 		tmp = val;
   7446 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   7447 		return;
   7448 	}
   7449 
   7450 	/*
   7451 	 * Lower than page 768 works differently than the rest so it has its
   7452 	 * own func
   7453 	 */
   7454 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7455 		printf("gmii_hv_writereg!!!\n");
   7456 		return;
   7457 	}
   7458 
   7459 	/*
   7460 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   7461 	 * Power Down (whenever bit 11 of the PHY control register is set)
   7462 	 */
   7463 
   7464 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7465 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7466 		    page << BME1000_PAGE_SHIFT);
   7467 	}
   7468 
   7469 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   7470 	wm_put_swfwhw_semaphore(sc);
   7471 }
   7472 
   7473 /*
   7474  * wm_sgmii_uses_mdio
   7475  *
   7476  * Check whether the transaction is to the internal PHY or the external
   7477  * MDIO interface. Return true if it's MDIO.
   7478  */
   7479 static bool
   7480 wm_sgmii_uses_mdio(struct wm_softc *sc)
   7481 {
   7482 	uint32_t reg;
   7483 	bool ismdio = false;
   7484 
   7485 	switch (sc->sc_type) {
   7486 	case WM_T_82575:
   7487 	case WM_T_82576:
   7488 		reg = CSR_READ(sc, WMREG_MDIC);
   7489 		ismdio = ((reg & MDIC_DEST) != 0);
   7490 		break;
   7491 	case WM_T_82580:
   7492 	case WM_T_82580ER:
   7493 	case WM_T_I350:
   7494 	case WM_T_I354:
   7495 	case WM_T_I210:
   7496 	case WM_T_I211:
   7497 		reg = CSR_READ(sc, WMREG_MDICNFG);
   7498 		ismdio = ((reg & MDICNFG_DEST) != 0);
   7499 		break;
   7500 	default:
   7501 		break;
   7502 	}
   7503 
   7504 	return ismdio;
   7505 }
   7506 
   7507 /*
   7508  * wm_sgmii_readreg:	[mii interface function]
   7509  *
   7510  *	Read a PHY register on the SGMII
   7511  * This could be handled by the PHY layer if we didn't have to lock the
   7512  * ressource ...
   7513  */
   7514 static int
   7515 wm_sgmii_readreg(device_t self, int phy, int reg)
   7516 {
   7517 	struct wm_softc *sc = device_private(self);
   7518 	uint32_t i2ccmd;
   7519 	int i, rv;
   7520 
   7521 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7522 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7523 		    __func__);
   7524 		return 0;
   7525 	}
   7526 
   7527 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7528 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7529 	    | I2CCMD_OPCODE_READ;
   7530 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7531 
   7532 	/* Poll the ready bit */
   7533 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7534 		delay(50);
   7535 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7536 		if (i2ccmd & I2CCMD_READY)
   7537 			break;
   7538 	}
   7539 	if ((i2ccmd & I2CCMD_READY) == 0)
   7540 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7541 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7542 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7543 
   7544 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7545 
   7546 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7547 	return rv;
   7548 }
   7549 
   7550 /*
   7551  * wm_sgmii_writereg:	[mii interface function]
   7552  *
   7553  *	Write a PHY register on the SGMII.
   7554  * This could be handled by the PHY layer if we didn't have to lock the
   7555  * ressource ...
   7556  */
   7557 static void
   7558 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7559 {
   7560 	struct wm_softc *sc = device_private(self);
   7561 	uint32_t i2ccmd;
   7562 	int i;
   7563 
   7564 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7565 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7566 		    __func__);
   7567 		return;
   7568 	}
   7569 
   7570 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7571 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7572 	    | I2CCMD_OPCODE_WRITE;
   7573 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7574 
   7575 	/* Poll the ready bit */
   7576 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7577 		delay(50);
   7578 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7579 		if (i2ccmd & I2CCMD_READY)
   7580 			break;
   7581 	}
   7582 	if ((i2ccmd & I2CCMD_READY) == 0)
   7583 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7584 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7585 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7586 
   7587 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7588 }
   7589 
   7590 /*
   7591  * wm_gmii_82580_readreg:	[mii interface function]
   7592  *
   7593  *	Read a PHY register on the 82580 and I350.
   7594  * This could be handled by the PHY layer if we didn't have to lock the
   7595  * ressource ...
   7596  */
   7597 static int
   7598 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   7599 {
   7600 	struct wm_softc *sc = device_private(self);
   7601 	int sem;
   7602 	int rv;
   7603 
   7604 	sem = swfwphysem[sc->sc_funcid];
   7605 	if (wm_get_swfw_semaphore(sc, sem)) {
   7606 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7607 		    __func__);
   7608 		return 0;
   7609 	}
   7610 
   7611 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   7612 
   7613 	wm_put_swfw_semaphore(sc, sem);
   7614 	return rv;
   7615 }
   7616 
   7617 /*
   7618  * wm_gmii_82580_writereg:	[mii interface function]
   7619  *
   7620  *	Write a PHY register on the 82580 and I350.
   7621  * This could be handled by the PHY layer if we didn't have to lock the
   7622  * ressource ...
   7623  */
   7624 static void
   7625 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   7626 {
   7627 	struct wm_softc *sc = device_private(self);
   7628 	int sem;
   7629 
   7630 	sem = swfwphysem[sc->sc_funcid];
   7631 	if (wm_get_swfw_semaphore(sc, sem)) {
   7632 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7633 		    __func__);
   7634 		return;
   7635 	}
   7636 
   7637 	wm_gmii_i82544_writereg(self, phy, reg, val);
   7638 
   7639 	wm_put_swfw_semaphore(sc, sem);
   7640 }
   7641 
   7642 /*
   7643  * wm_gmii_statchg:	[mii interface function]
   7644  *
   7645  *	Callback from MII layer when media changes.
   7646  */
   7647 static void
   7648 wm_gmii_statchg(struct ifnet *ifp)
   7649 {
   7650 	struct wm_softc *sc = ifp->if_softc;
   7651 	struct mii_data *mii = &sc->sc_mii;
   7652 
   7653 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   7654 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7655 	sc->sc_fcrtl &= ~FCRTL_XONE;
   7656 
   7657 	/*
   7658 	 * Get flow control negotiation result.
   7659 	 */
   7660 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   7661 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   7662 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   7663 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   7664 	}
   7665 
   7666 	if (sc->sc_flowflags & IFM_FLOW) {
   7667 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   7668 			sc->sc_ctrl |= CTRL_TFCE;
   7669 			sc->sc_fcrtl |= FCRTL_XONE;
   7670 		}
   7671 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   7672 			sc->sc_ctrl |= CTRL_RFCE;
   7673 	}
   7674 
   7675 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7676 		DPRINTF(WM_DEBUG_LINK,
   7677 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   7678 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7679 	} else {
   7680 		DPRINTF(WM_DEBUG_LINK,
   7681 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   7682 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7683 	}
   7684 
   7685 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7686 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7687 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   7688 						 : WMREG_FCRTL, sc->sc_fcrtl);
   7689 	if (sc->sc_type == WM_T_80003) {
   7690 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   7691 		case IFM_1000_T:
   7692 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7693 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   7694 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7695 			break;
   7696 		default:
   7697 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7698 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   7699 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   7700 			break;
   7701 		}
   7702 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7703 	}
   7704 }
   7705 
   7706 /*
   7707  * wm_kmrn_readreg:
   7708  *
   7709  *	Read a kumeran register
   7710  */
   7711 static int
   7712 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   7713 {
   7714 	int rv;
   7715 
   7716 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   7717 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7718 			aprint_error_dev(sc->sc_dev,
   7719 			    "%s: failed to get semaphore\n", __func__);
   7720 			return 0;
   7721 		}
   7722 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   7723 		if (wm_get_swfwhw_semaphore(sc)) {
   7724 			aprint_error_dev(sc->sc_dev,
   7725 			    "%s: failed to get semaphore\n", __func__);
   7726 			return 0;
   7727 		}
   7728 	}
   7729 
   7730 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7731 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7732 	    KUMCTRLSTA_REN);
   7733 	CSR_WRITE_FLUSH(sc);
   7734 	delay(2);
   7735 
   7736 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   7737 
   7738 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   7739 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7740 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   7741 		wm_put_swfwhw_semaphore(sc);
   7742 
   7743 	return rv;
   7744 }
   7745 
   7746 /*
   7747  * wm_kmrn_writereg:
   7748  *
   7749  *	Write a kumeran register
   7750  */
   7751 static void
   7752 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   7753 {
   7754 
   7755 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   7756 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7757 			aprint_error_dev(sc->sc_dev,
   7758 			    "%s: failed to get semaphore\n", __func__);
   7759 			return;
   7760 		}
   7761 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   7762 		if (wm_get_swfwhw_semaphore(sc)) {
   7763 			aprint_error_dev(sc->sc_dev,
   7764 			    "%s: failed to get semaphore\n", __func__);
   7765 			return;
   7766 		}
   7767 	}
   7768 
   7769 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7770 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7771 	    (val & KUMCTRLSTA_MASK));
   7772 
   7773 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   7774 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7775 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   7776 		wm_put_swfwhw_semaphore(sc);
   7777 }
   7778 
   7779 static int
   7780 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
   7781 {
   7782 	uint32_t eecd = 0;
   7783 
   7784 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   7785 	    || sc->sc_type == WM_T_82583) {
   7786 		eecd = CSR_READ(sc, WMREG_EECD);
   7787 
   7788 		/* Isolate bits 15 & 16 */
   7789 		eecd = ((eecd >> 15) & 0x03);
   7790 
   7791 		/* If both bits are set, device is Flash type */
   7792 		if (eecd == 0x03)
   7793 			return 0;
   7794 	}
   7795 	return 1;
   7796 }
   7797 
   7798 static int
   7799 wm_get_swsm_semaphore(struct wm_softc *sc)
   7800 {
   7801 	int32_t timeout;
   7802 	uint32_t swsm;
   7803 
   7804 	/* Get the SW semaphore. */
   7805 	timeout = 1000 + 1; /* XXX */
   7806 	while (timeout) {
   7807 		swsm = CSR_READ(sc, WMREG_SWSM);
   7808 
   7809 		if ((swsm & SWSM_SMBI) == 0)
   7810 			break;
   7811 
   7812 		delay(50);
   7813 		timeout--;
   7814 	}
   7815 
   7816 	if (timeout == 0) {
   7817 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   7818 		return 1;
   7819 	}
   7820 
   7821 	/* Get the FW semaphore. */
   7822 	timeout = 1000 + 1; /* XXX */
   7823 	while (timeout) {
   7824 		swsm = CSR_READ(sc, WMREG_SWSM);
   7825 		swsm |= SWSM_SWESMBI;
   7826 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   7827 		/* if we managed to set the bit we got the semaphore. */
   7828 		swsm = CSR_READ(sc, WMREG_SWSM);
   7829 		if (swsm & SWSM_SWESMBI)
   7830 			break;
   7831 
   7832 		delay(50);
   7833 		timeout--;
   7834 	}
   7835 
   7836 	if (timeout == 0) {
   7837 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   7838 		/* Release semaphores */
   7839 		wm_put_swsm_semaphore(sc);
   7840 		return 1;
   7841 	}
   7842 	return 0;
   7843 }
   7844 
   7845 static void
   7846 wm_put_swsm_semaphore(struct wm_softc *sc)
   7847 {
   7848 	uint32_t swsm;
   7849 
   7850 	swsm = CSR_READ(sc, WMREG_SWSM);
   7851 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   7852 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   7853 }
   7854 
   7855 static int
   7856 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   7857 {
   7858 	uint32_t swfw_sync;
   7859 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   7860 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   7861 	int timeout = 200;
   7862 
   7863 	for (timeout = 0; timeout < 200; timeout++) {
   7864 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   7865 			if (wm_get_swsm_semaphore(sc)) {
   7866 				aprint_error_dev(sc->sc_dev,
   7867 				    "%s: failed to get semaphore\n",
   7868 				    __func__);
   7869 				return 1;
   7870 			}
   7871 		}
   7872 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   7873 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   7874 			swfw_sync |= swmask;
   7875 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   7876 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   7877 				wm_put_swsm_semaphore(sc);
   7878 			return 0;
   7879 		}
   7880 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   7881 			wm_put_swsm_semaphore(sc);
   7882 		delay(5000);
   7883 	}
   7884 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   7885 	    device_xname(sc->sc_dev), mask, swfw_sync);
   7886 	return 1;
   7887 }
   7888 
   7889 static void
   7890 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   7891 {
   7892 	uint32_t swfw_sync;
   7893 
   7894 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   7895 		while (wm_get_swsm_semaphore(sc) != 0)
   7896 			continue;
   7897 	}
   7898 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   7899 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   7900 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   7901 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   7902 		wm_put_swsm_semaphore(sc);
   7903 }
   7904 
   7905 static int
   7906 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   7907 {
   7908 	uint32_t ext_ctrl;
   7909 	int timeout = 200;
   7910 
   7911 	for (timeout = 0; timeout < 200; timeout++) {
   7912 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   7913 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   7914 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   7915 
   7916 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   7917 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   7918 			return 0;
   7919 		delay(5000);
   7920 	}
   7921 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   7922 	    device_xname(sc->sc_dev), ext_ctrl);
   7923 	return 1;
   7924 }
   7925 
   7926 static void
   7927 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   7928 {
   7929 	uint32_t ext_ctrl;
   7930 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   7931 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   7932 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   7933 }
   7934 
   7935 static int
   7936 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   7937 {
   7938 	int i = 0;
   7939 	uint32_t reg;
   7940 
   7941 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   7942 	do {
   7943 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   7944 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   7945 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   7946 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   7947 			break;
   7948 		delay(2*1000);
   7949 		i++;
   7950 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   7951 
   7952 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   7953 		wm_put_hw_semaphore_82573(sc);
   7954 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   7955 		    device_xname(sc->sc_dev));
   7956 		return -1;
   7957 	}
   7958 
   7959 	return 0;
   7960 }
   7961 
   7962 static void
   7963 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   7964 {
   7965 	uint32_t reg;
   7966 
   7967 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   7968 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   7969 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   7970 }
   7971 
   7972 static int
   7973 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   7974 {
   7975 	uint32_t eecd;
   7976 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   7977 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   7978 	uint8_t sig_byte = 0;
   7979 
   7980 	switch (sc->sc_type) {
   7981 	case WM_T_ICH8:
   7982 	case WM_T_ICH9:
   7983 		eecd = CSR_READ(sc, WMREG_EECD);
   7984 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   7985 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   7986 			return 0;
   7987 		}
   7988 		/* FALLTHROUGH */
   7989 	default:
   7990 		/* Default to 0 */
   7991 		*bank = 0;
   7992 
   7993 		/* Check bank 0 */
   7994 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   7995 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7996 			*bank = 0;
   7997 			return 0;
   7998 		}
   7999 
   8000 		/* Check bank 1 */
   8001 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   8002 		    &sig_byte);
   8003 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   8004 			*bank = 1;
   8005 			return 0;
   8006 		}
   8007 	}
   8008 
   8009 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   8010 		device_xname(sc->sc_dev)));
   8011 	return -1;
   8012 }
   8013 
   8014 /******************************************************************************
   8015  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   8016  * register.
   8017  *
   8018  * sc - Struct containing variables accessed by shared code
   8019  * offset - offset of word in the EEPROM to read
   8020  * data - word read from the EEPROM
   8021  * words - number of words to read
   8022  *****************************************************************************/
   8023 static int
   8024 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   8025 {
   8026 	int32_t  error = 0;
   8027 	uint32_t flash_bank = 0;
   8028 	uint32_t act_offset = 0;
   8029 	uint32_t bank_offset = 0;
   8030 	uint16_t word = 0;
   8031 	uint16_t i = 0;
   8032 
   8033 	/* We need to know which is the valid flash bank.  In the event
   8034 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   8035 	 * managing flash_bank.  So it cannot be trusted and needs
   8036 	 * to be updated with each read.
   8037 	 */
   8038 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
   8039 	if (error) {
   8040 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
   8041 		    __func__);
   8042 		flash_bank = 0;
   8043 	}
   8044 
   8045 	/*
   8046 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   8047 	 * size
   8048 	 */
   8049 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   8050 
   8051 	error = wm_get_swfwhw_semaphore(sc);
   8052 	if (error) {
   8053 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8054 		    __func__);
   8055 		return error;
   8056 	}
   8057 
   8058 	for (i = 0; i < words; i++) {
   8059 		/* The NVM part needs a byte offset, hence * 2 */
   8060 		act_offset = bank_offset + ((offset + i) * 2);
   8061 		error = wm_read_ich8_word(sc, act_offset, &word);
   8062 		if (error) {
   8063 			aprint_error_dev(sc->sc_dev,
   8064 			    "%s: failed to read NVM\n", __func__);
   8065 			break;
   8066 		}
   8067 		data[i] = word;
   8068 	}
   8069 
   8070 	wm_put_swfwhw_semaphore(sc);
   8071 	return error;
   8072 }
   8073 
   8074 /******************************************************************************
   8075  * This function does initial flash setup so that a new read/write/erase cycle
   8076  * can be started.
   8077  *
   8078  * sc - The pointer to the hw structure
   8079  ****************************************************************************/
   8080 static int32_t
   8081 wm_ich8_cycle_init(struct wm_softc *sc)
   8082 {
   8083 	uint16_t hsfsts;
   8084 	int32_t error = 1;
   8085 	int32_t i     = 0;
   8086 
   8087 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8088 
   8089 	/* May be check the Flash Des Valid bit in Hw status */
   8090 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   8091 		return error;
   8092 	}
   8093 
   8094 	/* Clear FCERR in Hw status by writing 1 */
   8095 	/* Clear DAEL in Hw status by writing a 1 */
   8096 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   8097 
   8098 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8099 
   8100 	/*
   8101 	 * Either we should have a hardware SPI cycle in progress bit to check
   8102 	 * against, in order to start a new cycle or FDONE bit should be
   8103 	 * changed in the hardware so that it is 1 after harware reset, which
   8104 	 * can then be used as an indication whether a cycle is in progress or
   8105 	 * has been completed .. we should also have some software semaphore
   8106 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   8107 	 * threads access to those bits can be sequentiallized or a way so that
   8108 	 * 2 threads dont start the cycle at the same time
   8109 	 */
   8110 
   8111 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8112 		/*
   8113 		 * There is no cycle running at present, so we can start a
   8114 		 * cycle
   8115 		 */
   8116 
   8117 		/* Begin by setting Flash Cycle Done. */
   8118 		hsfsts |= HSFSTS_DONE;
   8119 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8120 		error = 0;
   8121 	} else {
   8122 		/*
   8123 		 * otherwise poll for sometime so the current cycle has a
   8124 		 * chance to end before giving up.
   8125 		 */
   8126 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   8127 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8128 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8129 				error = 0;
   8130 				break;
   8131 			}
   8132 			delay(1);
   8133 		}
   8134 		if (error == 0) {
   8135 			/*
   8136 			 * Successful in waiting for previous cycle to timeout,
   8137 			 * now set the Flash Cycle Done.
   8138 			 */
   8139 			hsfsts |= HSFSTS_DONE;
   8140 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8141 		}
   8142 	}
   8143 	return error;
   8144 }
   8145 
   8146 /******************************************************************************
   8147  * This function starts a flash cycle and waits for its completion
   8148  *
   8149  * sc - The pointer to the hw structure
   8150  ****************************************************************************/
   8151 static int32_t
   8152 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   8153 {
   8154 	uint16_t hsflctl;
   8155 	uint16_t hsfsts;
   8156 	int32_t error = 1;
   8157 	uint32_t i = 0;
   8158 
   8159 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   8160 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8161 	hsflctl |= HSFCTL_GO;
   8162 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8163 
   8164 	/* wait till FDONE bit is set to 1 */
   8165 	do {
   8166 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8167 		if (hsfsts & HSFSTS_DONE)
   8168 			break;
   8169 		delay(1);
   8170 		i++;
   8171 	} while (i < timeout);
   8172 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   8173 		error = 0;
   8174 
   8175 	return error;
   8176 }
   8177 
   8178 /******************************************************************************
   8179  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   8180  *
   8181  * sc - The pointer to the hw structure
   8182  * index - The index of the byte or word to read.
   8183  * size - Size of data to read, 1=byte 2=word
   8184  * data - Pointer to the word to store the value read.
   8185  *****************************************************************************/
   8186 static int32_t
   8187 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   8188     uint32_t size, uint16_t* data)
   8189 {
   8190 	uint16_t hsfsts;
   8191 	uint16_t hsflctl;
   8192 	uint32_t flash_linear_address;
   8193 	uint32_t flash_data = 0;
   8194 	int32_t error = 1;
   8195 	int32_t count = 0;
   8196 
   8197 	if (size < 1  || size > 2 || data == 0x0 ||
   8198 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   8199 		return error;
   8200 
   8201 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   8202 	    sc->sc_ich8_flash_base;
   8203 
   8204 	do {
   8205 		delay(1);
   8206 		/* Steps */
   8207 		error = wm_ich8_cycle_init(sc);
   8208 		if (error)
   8209 			break;
   8210 
   8211 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8212 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   8213 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   8214 		    & HSFCTL_BCOUNT_MASK;
   8215 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   8216 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8217 
   8218 		/*
   8219 		 * Write the last 24 bits of index into Flash Linear address
   8220 		 * field in Flash Address
   8221 		 */
   8222 		/* TODO: TBD maybe check the index against the size of flash */
   8223 
   8224 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   8225 
   8226 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   8227 
   8228 		/*
   8229 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   8230 		 * the whole sequence a few more times, else read in (shift in)
   8231 		 * the Flash Data0, the order is least significant byte first
   8232 		 * msb to lsb
   8233 		 */
   8234 		if (error == 0) {
   8235 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   8236 			if (size == 1)
   8237 				*data = (uint8_t)(flash_data & 0x000000FF);
   8238 			else if (size == 2)
   8239 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   8240 			break;
   8241 		} else {
   8242 			/*
   8243 			 * If we've gotten here, then things are probably
   8244 			 * completely hosed, but if the error condition is
   8245 			 * detected, it won't hurt to give it another try...
   8246 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   8247 			 */
   8248 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8249 			if (hsfsts & HSFSTS_ERR) {
   8250 				/* Repeat for some time before giving up. */
   8251 				continue;
   8252 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   8253 				break;
   8254 		}
   8255 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   8256 
   8257 	return error;
   8258 }
   8259 
   8260 /******************************************************************************
   8261  * Reads a single byte from the NVM using the ICH8 flash access registers.
   8262  *
   8263  * sc - pointer to wm_hw structure
   8264  * index - The index of the byte to read.
   8265  * data - Pointer to a byte to store the value read.
   8266  *****************************************************************************/
   8267 static int32_t
   8268 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   8269 {
   8270 	int32_t status;
   8271 	uint16_t word = 0;
   8272 
   8273 	status = wm_read_ich8_data(sc, index, 1, &word);
   8274 	if (status == 0)
   8275 		*data = (uint8_t)word;
   8276 	else
   8277 		*data = 0;
   8278 
   8279 	return status;
   8280 }
   8281 
   8282 /******************************************************************************
   8283  * Reads a word from the NVM using the ICH8 flash access registers.
   8284  *
   8285  * sc - pointer to wm_hw structure
   8286  * index - The starting byte index of the word to read.
   8287  * data - Pointer to a word to store the value read.
   8288  *****************************************************************************/
   8289 static int32_t
   8290 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   8291 {
   8292 	int32_t status;
   8293 
   8294 	status = wm_read_ich8_data(sc, index, 2, data);
   8295 	return status;
   8296 }
   8297 
   8298 static int
   8299 wm_check_mng_mode(struct wm_softc *sc)
   8300 {
   8301 	int rv;
   8302 
   8303 	switch (sc->sc_type) {
   8304 	case WM_T_ICH8:
   8305 	case WM_T_ICH9:
   8306 	case WM_T_ICH10:
   8307 	case WM_T_PCH:
   8308 	case WM_T_PCH2:
   8309 	case WM_T_PCH_LPT:
   8310 		rv = wm_check_mng_mode_ich8lan(sc);
   8311 		break;
   8312 	case WM_T_82574:
   8313 	case WM_T_82583:
   8314 		rv = wm_check_mng_mode_82574(sc);
   8315 		break;
   8316 	case WM_T_82571:
   8317 	case WM_T_82572:
   8318 	case WM_T_82573:
   8319 	case WM_T_80003:
   8320 		rv = wm_check_mng_mode_generic(sc);
   8321 		break;
   8322 	default:
   8323 		/* noting to do */
   8324 		rv = 0;
   8325 		break;
   8326 	}
   8327 
   8328 	return rv;
   8329 }
   8330 
   8331 static int
   8332 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   8333 {
   8334 	uint32_t fwsm;
   8335 
   8336 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8337 
   8338 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   8339 		return 1;
   8340 
   8341 	return 0;
   8342 }
   8343 
   8344 static int
   8345 wm_check_mng_mode_82574(struct wm_softc *sc)
   8346 {
   8347 	uint16_t data;
   8348 
   8349 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
   8350 
   8351 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
   8352 		return 1;
   8353 
   8354 	return 0;
   8355 }
   8356 
   8357 static int
   8358 wm_check_mng_mode_generic(struct wm_softc *sc)
   8359 {
   8360 	uint32_t fwsm;
   8361 
   8362 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8363 
   8364 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   8365 		return 1;
   8366 
   8367 	return 0;
   8368 }
   8369 
   8370 static int
   8371 wm_enable_mng_pass_thru(struct wm_softc *sc)
   8372 {
   8373 	uint32_t manc, fwsm, factps;
   8374 
   8375 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   8376 		return 0;
   8377 
   8378 	manc = CSR_READ(sc, WMREG_MANC);
   8379 
   8380 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   8381 		device_xname(sc->sc_dev), manc));
   8382 	if ((manc & MANC_RECV_TCO_EN) == 0)
   8383 		return 0;
   8384 
   8385 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   8386 		fwsm = CSR_READ(sc, WMREG_FWSM);
   8387 		factps = CSR_READ(sc, WMREG_FACTPS);
   8388 		if (((factps & FACTPS_MNGCG) == 0)
   8389 		    && ((fwsm & FWSM_MODE_MASK)
   8390 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   8391 			return 1;
   8392 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8393 		uint16_t data;
   8394 
   8395 		factps = CSR_READ(sc, WMREG_FACTPS);
   8396 		wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
   8397 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   8398 			device_xname(sc->sc_dev), factps, data));
   8399 		if (((factps & FACTPS_MNGCG) == 0)
   8400 		    && ((data & EEPROM_CFG2_MNGM_MASK)
   8401 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
   8402 			return 1;
   8403 	} else if (((manc & MANC_SMBUS_EN) != 0)
   8404 	    && ((manc & MANC_ASF_EN) == 0))
   8405 		return 1;
   8406 
   8407 	return 0;
   8408 }
   8409 
   8410 static int
   8411 wm_check_reset_block(struct wm_softc *sc)
   8412 {
   8413 	uint32_t reg;
   8414 
   8415 	switch (sc->sc_type) {
   8416 	case WM_T_ICH8:
   8417 	case WM_T_ICH9:
   8418 	case WM_T_ICH10:
   8419 	case WM_T_PCH:
   8420 	case WM_T_PCH2:
   8421 	case WM_T_PCH_LPT:
   8422 		reg = CSR_READ(sc, WMREG_FWSM);
   8423 		if ((reg & FWSM_RSPCIPHY) != 0)
   8424 			return 0;
   8425 		else
   8426 			return -1;
   8427 		break;
   8428 	case WM_T_82571:
   8429 	case WM_T_82572:
   8430 	case WM_T_82573:
   8431 	case WM_T_82574:
   8432 	case WM_T_82583:
   8433 	case WM_T_80003:
   8434 		reg = CSR_READ(sc, WMREG_MANC);
   8435 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   8436 			return -1;
   8437 		else
   8438 			return 0;
   8439 		break;
   8440 	default:
   8441 		/* no problem */
   8442 		break;
   8443 	}
   8444 
   8445 	return 0;
   8446 }
   8447 
   8448 static void
   8449 wm_get_hw_control(struct wm_softc *sc)
   8450 {
   8451 	uint32_t reg;
   8452 
   8453 	switch (sc->sc_type) {
   8454 	case WM_T_82573:
   8455 		reg = CSR_READ(sc, WMREG_SWSM);
   8456 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   8457 		break;
   8458 	case WM_T_82571:
   8459 	case WM_T_82572:
   8460 	case WM_T_82574:
   8461 	case WM_T_82583:
   8462 	case WM_T_80003:
   8463 	case WM_T_ICH8:
   8464 	case WM_T_ICH9:
   8465 	case WM_T_ICH10:
   8466 	case WM_T_PCH:
   8467 	case WM_T_PCH2:
   8468 	case WM_T_PCH_LPT:
   8469 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8470 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   8471 		break;
   8472 	default:
   8473 		break;
   8474 	}
   8475 }
   8476 
   8477 static void
   8478 wm_release_hw_control(struct wm_softc *sc)
   8479 {
   8480 	uint32_t reg;
   8481 
   8482 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   8483 		return;
   8484 
   8485 	if (sc->sc_type == WM_T_82573) {
   8486 		reg = CSR_READ(sc, WMREG_SWSM);
   8487 		reg &= ~SWSM_DRV_LOAD;
   8488 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   8489 	} else {
   8490 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8491 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   8492 	}
   8493 }
   8494 
   8495 /* XXX Currently TBI only */
   8496 static int
   8497 wm_check_for_link(struct wm_softc *sc)
   8498 {
   8499 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8500 	uint32_t rxcw;
   8501 	uint32_t ctrl;
   8502 	uint32_t status;
   8503 	uint32_t sig;
   8504 
   8505 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   8506 		sc->sc_tbi_linkup = 1;
   8507 		return 0;
   8508 	}
   8509 
   8510 	rxcw = CSR_READ(sc, WMREG_RXCW);
   8511 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8512 	status = CSR_READ(sc, WMREG_STATUS);
   8513 
   8514 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   8515 
   8516 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   8517 		device_xname(sc->sc_dev), __func__,
   8518 		((ctrl & CTRL_SWDPIN(1)) == sig),
   8519 		((status & STATUS_LU) != 0),
   8520 		((rxcw & RXCW_C) != 0)
   8521 		    ));
   8522 
   8523 	/*
   8524 	 * SWDPIN   LU RXCW
   8525 	 *      0    0    0
   8526 	 *      0    0    1	(should not happen)
   8527 	 *      0    1    0	(should not happen)
   8528 	 *      0    1    1	(should not happen)
   8529 	 *      1    0    0	Disable autonego and force linkup
   8530 	 *      1    0    1	got /C/ but not linkup yet
   8531 	 *      1    1    0	(linkup)
   8532 	 *      1    1    1	If IFM_AUTO, back to autonego
   8533 	 *
   8534 	 */
   8535 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8536 	    && ((status & STATUS_LU) == 0)
   8537 	    && ((rxcw & RXCW_C) == 0)) {
   8538 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   8539 			__func__));
   8540 		sc->sc_tbi_linkup = 0;
   8541 		/* Disable auto-negotiation in the TXCW register */
   8542 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   8543 
   8544 		/*
   8545 		 * Force link-up and also force full-duplex.
   8546 		 *
   8547 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   8548 		 * so we should update sc->sc_ctrl
   8549 		 */
   8550 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   8551 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8552 	} else if (((status & STATUS_LU) != 0)
   8553 	    && ((rxcw & RXCW_C) != 0)
   8554 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   8555 		sc->sc_tbi_linkup = 1;
   8556 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   8557 			__func__));
   8558 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8559 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   8560 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8561 	    && ((rxcw & RXCW_C) != 0)) {
   8562 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   8563 	} else {
   8564 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   8565 			status));
   8566 	}
   8567 
   8568 	return 0;
   8569 }
   8570 
   8571 /* Work-around for 82566 Kumeran PCS lock loss */
   8572 static void
   8573 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   8574 {
   8575 	int miistatus, active, i;
   8576 	int reg;
   8577 
   8578 	miistatus = sc->sc_mii.mii_media_status;
   8579 
   8580 	/* If the link is not up, do nothing */
   8581 	if ((miistatus & IFM_ACTIVE) != 0)
   8582 		return;
   8583 
   8584 	active = sc->sc_mii.mii_media_active;
   8585 
   8586 	/* Nothing to do if the link is other than 1Gbps */
   8587 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   8588 		return;
   8589 
   8590 	for (i = 0; i < 10; i++) {
   8591 		/* read twice */
   8592 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   8593 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   8594 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   8595 			goto out;	/* GOOD! */
   8596 
   8597 		/* Reset the PHY */
   8598 		wm_gmii_reset(sc);
   8599 		delay(5*1000);
   8600 	}
   8601 
   8602 	/* Disable GigE link negotiation */
   8603 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8604 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   8605 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8606 
   8607 	/*
   8608 	 * Call gig speed drop workaround on Gig disable before accessing
   8609 	 * any PHY registers.
   8610 	 */
   8611 	wm_gig_downshift_workaround_ich8lan(sc);
   8612 
   8613 out:
   8614 	return;
   8615 }
   8616 
   8617 /* WOL from S5 stops working */
   8618 static void
   8619 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   8620 {
   8621 	uint16_t kmrn_reg;
   8622 
   8623 	/* Only for igp3 */
   8624 	if (sc->sc_phytype == WMPHY_IGP_3) {
   8625 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   8626 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   8627 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   8628 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   8629 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   8630 	}
   8631 }
   8632 
   8633 #ifdef WM_WOL
   8634 /* Power down workaround on D3 */
   8635 static void
   8636 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   8637 {
   8638 	uint32_t reg;
   8639 	int i;
   8640 
   8641 	for (i = 0; i < 2; i++) {
   8642 		/* Disable link */
   8643 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8644 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   8645 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8646 
   8647 		/*
   8648 		 * Call gig speed drop workaround on Gig disable before
   8649 		 * accessing any PHY registers
   8650 		 */
   8651 		if (sc->sc_type == WM_T_ICH8)
   8652 			wm_gig_downshift_workaround_ich8lan(sc);
   8653 
   8654 		/* Write VR power-down enable */
   8655 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8656 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8657 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   8658 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   8659 
   8660 		/* Read it back and test */
   8661 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8662 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8663 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   8664 			break;
   8665 
   8666 		/* Issue PHY reset and repeat at most one more time */
   8667 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8668 	}
   8669 }
   8670 #endif /* WM_WOL */
   8671 
   8672 /*
   8673  * Workaround for pch's PHYs
   8674  * XXX should be moved to new PHY driver?
   8675  */
   8676 static void
   8677 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   8678 {
   8679 	if (sc->sc_phytype == WMPHY_82577)
   8680 		wm_set_mdio_slow_mode_hv(sc);
   8681 
   8682 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   8683 
   8684 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   8685 
   8686 	/* 82578 */
   8687 	if (sc->sc_phytype == WMPHY_82578) {
   8688 		/* PCH rev. < 3 */
   8689 		if (sc->sc_rev < 3) {
   8690 			/* XXX 6 bit shift? Why? Is it page2? */
   8691 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   8692 			    0x66c0);
   8693 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   8694 			    0xffff);
   8695 		}
   8696 
   8697 		/* XXX phy rev. < 2 */
   8698 	}
   8699 
   8700 	/* Select page 0 */
   8701 
   8702 	/* XXX acquire semaphore */
   8703 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   8704 	/* XXX release semaphore */
   8705 
   8706 	/*
   8707 	 * Configure the K1 Si workaround during phy reset assuming there is
   8708 	 * link so that it disables K1 if link is in 1Gbps.
   8709 	 */
   8710 	wm_k1_gig_workaround_hv(sc, 1);
   8711 }
   8712 
   8713 static void
   8714 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   8715 {
   8716 
   8717 	wm_set_mdio_slow_mode_hv(sc);
   8718 }
   8719 
   8720 static void
   8721 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   8722 {
   8723 	int k1_enable = sc->sc_nvm_k1_enabled;
   8724 
   8725 	/* XXX acquire semaphore */
   8726 
   8727 	if (link) {
   8728 		k1_enable = 0;
   8729 
   8730 		/* Link stall fix for link up */
   8731 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   8732 	} else {
   8733 		/* Link stall fix for link down */
   8734 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   8735 	}
   8736 
   8737 	wm_configure_k1_ich8lan(sc, k1_enable);
   8738 
   8739 	/* XXX release semaphore */
   8740 }
   8741 
   8742 static void
   8743 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   8744 {
   8745 	uint32_t reg;
   8746 
   8747 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   8748 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   8749 	    reg | HV_KMRN_MDIO_SLOW);
   8750 }
   8751 
   8752 static void
   8753 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   8754 {
   8755 	uint32_t ctrl, ctrl_ext, tmp;
   8756 	uint16_t kmrn_reg;
   8757 
   8758 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   8759 
   8760 	if (k1_enable)
   8761 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   8762 	else
   8763 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   8764 
   8765 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   8766 
   8767 	delay(20);
   8768 
   8769 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8770 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8771 
   8772 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   8773 	tmp |= CTRL_FRCSPD;
   8774 
   8775 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   8776 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   8777 	CSR_WRITE_FLUSH(sc);
   8778 	delay(20);
   8779 
   8780 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   8781 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8782 	CSR_WRITE_FLUSH(sc);
   8783 	delay(20);
   8784 }
   8785 
   8786 static void
   8787 wm_smbustopci(struct wm_softc *sc)
   8788 {
   8789 	uint32_t fwsm;
   8790 
   8791 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8792 	if (((fwsm & FWSM_FW_VALID) == 0)
   8793 	    && ((wm_check_reset_block(sc) == 0))) {
   8794 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   8795 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   8796 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8797 		CSR_WRITE_FLUSH(sc);
   8798 		delay(10);
   8799 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   8800 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8801 		CSR_WRITE_FLUSH(sc);
   8802 		delay(50*1000);
   8803 
   8804 		/*
   8805 		 * Gate automatic PHY configuration by hardware on non-managed
   8806 		 * 82579
   8807 		 */
   8808 		if (sc->sc_type == WM_T_PCH2)
   8809 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   8810 	}
   8811 }
   8812 
   8813 static void
   8814 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   8815 {
   8816 	uint32_t gcr;
   8817 	pcireg_t ctrl2;
   8818 
   8819 	gcr = CSR_READ(sc, WMREG_GCR);
   8820 
   8821 	/* Only take action if timeout value is defaulted to 0 */
   8822 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   8823 		goto out;
   8824 
   8825 	if ((gcr & GCR_CAP_VER2) == 0) {
   8826 		gcr |= GCR_CMPL_TMOUT_10MS;
   8827 		goto out;
   8828 	}
   8829 
   8830 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   8831 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   8832 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   8833 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   8834 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   8835 
   8836 out:
   8837 	/* Disable completion timeout resend */
   8838 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   8839 
   8840 	CSR_WRITE(sc, WMREG_GCR, gcr);
   8841 }
   8842 
   8843 /* special case - for 82575 - need to do manual init ... */
   8844 static void
   8845 wm_reset_init_script_82575(struct wm_softc *sc)
   8846 {
   8847 	/*
   8848 	 * remark: this is untested code - we have no board without EEPROM
   8849 	 *  same setup as mentioned int the freeBSD driver for the i82575
   8850 	 */
   8851 
   8852 	/* SerDes configuration via SERDESCTRL */
   8853 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   8854 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   8855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   8856 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   8857 
   8858 	/* CCM configuration via CCMCTL register */
   8859 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   8860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   8861 
   8862 	/* PCIe lanes configuration */
   8863 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   8864 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   8865 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   8866 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   8867 
   8868 	/* PCIe PLL Configuration */
   8869 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   8870 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   8871 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   8872 }
   8873 
   8874 static void
   8875 wm_init_manageability(struct wm_softc *sc)
   8876 {
   8877 
   8878 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8879 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   8880 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8881 
   8882 		/* disabl hardware interception of ARP */
   8883 		manc &= ~MANC_ARP_EN;
   8884 
   8885 		/* enable receiving management packets to the host */
   8886 		if (sc->sc_type >= WM_T_82571) {
   8887 			manc |= MANC_EN_MNG2HOST;
   8888 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   8889 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   8890 
   8891 		}
   8892 
   8893 		CSR_WRITE(sc, WMREG_MANC, manc);
   8894 	}
   8895 }
   8896 
   8897 static void
   8898 wm_release_manageability(struct wm_softc *sc)
   8899 {
   8900 
   8901 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8902 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8903 
   8904 		manc |= MANC_ARP_EN;
   8905 		if (sc->sc_type >= WM_T_82571)
   8906 			manc &= ~MANC_EN_MNG2HOST;
   8907 
   8908 		CSR_WRITE(sc, WMREG_MANC, manc);
   8909 	}
   8910 }
   8911 
   8912 static void
   8913 wm_get_wakeup(struct wm_softc *sc)
   8914 {
   8915 
   8916 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   8917 	switch (sc->sc_type) {
   8918 	case WM_T_82573:
   8919 	case WM_T_82583:
   8920 		sc->sc_flags |= WM_F_HAS_AMT;
   8921 		/* FALLTHROUGH */
   8922 	case WM_T_80003:
   8923 	case WM_T_82541:
   8924 	case WM_T_82547:
   8925 	case WM_T_82571:
   8926 	case WM_T_82572:
   8927 	case WM_T_82574:
   8928 	case WM_T_82575:
   8929 	case WM_T_82576:
   8930 	case WM_T_82580:
   8931 	case WM_T_82580ER:
   8932 	case WM_T_I350:
   8933 	case WM_T_I354:
   8934 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   8935 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   8936 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8937 		break;
   8938 	case WM_T_ICH8:
   8939 	case WM_T_ICH9:
   8940 	case WM_T_ICH10:
   8941 	case WM_T_PCH:
   8942 	case WM_T_PCH2:
   8943 	case WM_T_PCH_LPT:
   8944 		sc->sc_flags |= WM_F_HAS_AMT;
   8945 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8946 		break;
   8947 	default:
   8948 		break;
   8949 	}
   8950 
   8951 	/* 1: HAS_MANAGE */
   8952 	if (wm_enable_mng_pass_thru(sc) != 0)
   8953 		sc->sc_flags |= WM_F_HAS_MANAGE;
   8954 
   8955 #ifdef WM_DEBUG
   8956 	printf("\n");
   8957 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   8958 		printf("HAS_AMT,");
   8959 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   8960 		printf("ARC_SUBSYS_VALID,");
   8961 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   8962 		printf("ASF_FIRMWARE_PRES,");
   8963 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   8964 		printf("HAS_MANAGE,");
   8965 	printf("\n");
   8966 #endif
   8967 	/*
   8968 	 * Note that the WOL flags is set after the resetting of the eeprom
   8969 	 * stuff
   8970 	 */
   8971 }
   8972 
   8973 #ifdef WM_WOL
   8974 /* WOL in the newer chipset interfaces (pchlan) */
   8975 static void
   8976 wm_enable_phy_wakeup(struct wm_softc *sc)
   8977 {
   8978 #if 0
   8979 	uint16_t preg;
   8980 
   8981 	/* Copy MAC RARs to PHY RARs */
   8982 
   8983 	/* Copy MAC MTA to PHY MTA */
   8984 
   8985 	/* Configure PHY Rx Control register */
   8986 
   8987 	/* Enable PHY wakeup in MAC register */
   8988 
   8989 	/* Configure and enable PHY wakeup in PHY registers */
   8990 
   8991 	/* Activate PHY wakeup */
   8992 
   8993 	/* XXX */
   8994 #endif
   8995 }
   8996 
   8997 static void
   8998 wm_enable_wakeup(struct wm_softc *sc)
   8999 {
   9000 	uint32_t reg, pmreg;
   9001 	pcireg_t pmode;
   9002 
   9003 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   9004 		&pmreg, NULL) == 0)
   9005 		return;
   9006 
   9007 	/* Advertise the wakeup capability */
   9008 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   9009 	    | CTRL_SWDPIN(3));
   9010 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   9011 
   9012 	/* ICH workaround */
   9013 	switch (sc->sc_type) {
   9014 	case WM_T_ICH8:
   9015 	case WM_T_ICH9:
   9016 	case WM_T_ICH10:
   9017 	case WM_T_PCH:
   9018 	case WM_T_PCH2:
   9019 	case WM_T_PCH_LPT:
   9020 		/* Disable gig during WOL */
   9021 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9022 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   9023 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9024 		if (sc->sc_type == WM_T_PCH)
   9025 			wm_gmii_reset(sc);
   9026 
   9027 		/* Power down workaround */
   9028 		if (sc->sc_phytype == WMPHY_82577) {
   9029 			struct mii_softc *child;
   9030 
   9031 			/* Assume that the PHY is copper */
   9032 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9033 			if (child->mii_mpd_rev <= 2)
   9034 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   9035 				    (768 << 5) | 25, 0x0444); /* magic num */
   9036 		}
   9037 		break;
   9038 	default:
   9039 		break;
   9040 	}
   9041 
   9042 	/* Keep the laser running on fiber adapters */
   9043 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
   9044 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
   9045 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9046 		reg |= CTRL_EXT_SWDPIN(3);
   9047 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9048 	}
   9049 
   9050 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   9051 #if 0	/* for the multicast packet */
   9052 	reg |= WUFC_MC;
   9053 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   9054 #endif
   9055 
   9056 	if (sc->sc_type == WM_T_PCH) {
   9057 		wm_enable_phy_wakeup(sc);
   9058 	} else {
   9059 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   9060 		CSR_WRITE(sc, WMREG_WUFC, reg);
   9061 	}
   9062 
   9063 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   9064 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   9065 		|| (sc->sc_type == WM_T_PCH2))
   9066 		    && (sc->sc_phytype == WMPHY_IGP_3))
   9067 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   9068 
   9069 	/* Request PME */
   9070 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   9071 #if 0
   9072 	/* Disable WOL */
   9073 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   9074 #else
   9075 	/* For WOL */
   9076 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   9077 #endif
   9078 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   9079 }
   9080 #endif /* WM_WOL */
   9081 
   9082 static bool
   9083 wm_suspend(device_t self, const pmf_qual_t *qual)
   9084 {
   9085 	struct wm_softc *sc = device_private(self);
   9086 
   9087 	wm_release_manageability(sc);
   9088 	wm_release_hw_control(sc);
   9089 #ifdef WM_WOL
   9090 	wm_enable_wakeup(sc);
   9091 #endif
   9092 
   9093 	return true;
   9094 }
   9095 
   9096 static bool
   9097 wm_resume(device_t self, const pmf_qual_t *qual)
   9098 {
   9099 	struct wm_softc *sc = device_private(self);
   9100 
   9101 	wm_init_manageability(sc);
   9102 
   9103 	return true;
   9104 }
   9105 
   9106 static void
   9107 wm_set_eee_i350(struct wm_softc * sc)
   9108 {
   9109 	uint32_t ipcnfg, eeer;
   9110 
   9111 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   9112 	eeer = CSR_READ(sc, WMREG_EEER);
   9113 
   9114 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   9115 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   9116 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9117 		    | EEER_LPI_FC);
   9118 	} else {
   9119 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   9120 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9121 		    | EEER_LPI_FC);
   9122 	}
   9123 
   9124 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   9125 	CSR_WRITE(sc, WMREG_EEER, eeer);
   9126 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   9127 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   9128 }
   9129